diff --git a/.github/workflows/config/node-release.yaml b/.github/workflows/config/node-release.yaml index 19dce2fd3fd7..bd013794d85e 100644 --- a/.github/workflows/config/node-release.yaml +++ b/.github/workflows/config/node-release.yaml @@ -1,11 +1,11 @@ release: branching: execution: - time: "17:00:00" + time: "21:00:00" schedule: - - on: "2024-09-30" - name: release/0.55 + - on: "2024-10-25" + name: release/0.56 initial-tag: - create: false - name: v0.55.0-alpha.2 + create: true + name: v0.56.0 diff --git a/.github/workflows/flow-artifact-determinism.yaml b/.github/workflows/flow-artifact-determinism.yaml index a67316fb04a8..f638b1c915bd 100644 --- a/.github/workflows/flow-artifact-determinism.yaml +++ b/.github/workflows/flow-artifact-determinism.yaml @@ -32,7 +32,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" push: branches: - develop @@ -55,7 +55,7 @@ jobs: with: ref: ${{ github.event.inputs.ref || '' }} java-distribution: ${{ inputs.java-distribution || 'temurin' }} - java-version: ${{ inputs.java-version || '21' }} + java-version: ${{ inputs.java-version || '21.0.4' }} secrets: gradle-cache-username: ${{ secrets.GRADLE_CACHE_USERNAME }} gradle-cache-password: ${{ secrets.GRADLE_CACHE_PASSWORD }} @@ -66,7 +66,7 @@ jobs: with: ref: ${{ github.event.inputs.ref || '' }} java-distribution: ${{ inputs.java-distribution || 'temurin' }} - java-version: ${{ inputs.java-version || '21' }} + java-version: ${{ inputs.java-version || '21.0.4' }} secrets: gradle-cache-username: ${{ secrets.GRADLE_CACHE_USERNAME }} gradle-cache-password: ${{ secrets.GRADLE_CACHE_PASSWORD }} diff --git a/.github/workflows/flow-node-performance-tests.yaml b/.github/workflows/flow-node-performance-tests.yaml index 3d630eed6b2f..c8809fea476f 100644 --- a/.github/workflows/flow-node-performance-tests.yaml +++ b/.github/workflows/flow-node-performance-tests.yaml @@ -53,10 +53,10 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Authenticate to Google Cloud - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" service_account: "hedera-artifact-builds@devops-1-254919.iam.gserviceaccount.com" diff --git a/.github/workflows/flow-pull-request-formatting.yaml b/.github/workflows/flow-pull-request-formatting.yaml index 616398aa783b..e063de41e66b 100644 --- a/.github/workflows/flow-pull-request-formatting.yaml +++ b/.github/workflows/flow-pull-request-formatting.yaml @@ -52,7 +52,7 @@ jobs: egress-policy: audit - name: Check PR Title - uses: step-security/conventional-pr-title-action@0eae74515f5a79f8773fa04142dd746df76666ac # v1.0.0 + uses: step-security/conventional-pr-title-action@19fb561b33015fd2184055a05ce5a3bcf2ba3f54 # v3.2.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/node-flow-build-application.yaml b/.github/workflows/node-flow-build-application.yaml index 714d54ce8b3c..56e2a63bad61 100644 --- a/.github/workflows/node-flow-build-application.yaml +++ b/.github/workflows/node-flow-build-application.yaml @@ -42,7 +42,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" java-distribution: description: "Java JDK Distribution:" type: string @@ -63,7 +63,7 @@ jobs: name: Code uses: ./.github/workflows/node-zxc-compile-application-code.yaml with: - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} enable-unit-tests: ${{ github.event_name == 'push' || github.event.inputs.enable-unit-tests == 'true' }} enable-hapi-tests-misc: ${{ github.event.inputs.enable-hapi-tests == 'true' }} @@ -83,3 +83,24 @@ jobs: gradle-cache-username: ${{ secrets.GRADLE_CACHE_USERNAME }} gradle-cache-password: ${{ secrets.GRADLE_CACHE_PASSWORD }} codecov-token: ${{ secrets.CODECOV_TOKEN }} + + deploy-ci-trigger: + name: Trigger CI Flows + runs-on: network-node-linux-medium + needs: code + if: ${{ needs.code.result == 'success' }} + steps: + - name: Harden Runner + uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 + with: + egress-policy: audit + + - name: Trigger ZXF Deploy Production Release + uses: step-security/workflow-dispatch@4d1049025980f72b1327cbfdeecb07fe7a20f577 # v1.2.4 + with: + workflow: .github/workflows/node-flow-deploy-release-artifact.yaml + repo: hashgraph/hedera-services # ensure we are executing in the hashgraph org + ref: develop # ensure we are always using the workflow definition from the develop branch + token: ${{ secrets.GH_ACCESS_TOKEN }} + inputs: '{ "ref": "${{ github.ref }}" }' + #inputs: '{ "event": "${{ toJSON(github.event) }}", "ref": "${{ github.ref }}" }' diff --git a/.github/workflows/node-flow-deploy-adhoc-artifact.yaml b/.github/workflows/node-flow-deploy-adhoc-artifact.yaml index 2ff197d2a551..a313968dee40 100644 --- a/.github/workflows/node-flow-deploy-adhoc-artifact.yaml +++ b/.github/workflows/node-flow-deploy-adhoc-artifact.yaml @@ -27,7 +27,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" java-distribution: description: "Java JDK Distribution:" type: string @@ -53,7 +53,7 @@ jobs: trigger-env-deploy: none release-profile: AdhocCommit dry-run-enabled: ${{ github.event.inputs.dry-run-enabled == 'true' }} - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} gradle-version: ${{ github.event.inputs.gradle-version || 'wrapper' }} diff --git a/.github/workflows/node-flow-deploy-release-artifact.yaml b/.github/workflows/node-flow-deploy-release-artifact.yaml index 3e565adf4844..b6dc60c3ea7a 100644 --- a/.github/workflows/node-flow-deploy-release-artifact.yaml +++ b/.github/workflows/node-flow-deploy-release-artifact.yaml @@ -19,14 +19,14 @@ on: push: tags: - "v[0-9]+.[0-9]+.[0-9]+-?*" - workflow_run: - workflows: - - "Node: Build Application" - branches: - - develop - types: - - completed - + workflow_dispatch: + inputs: + ref: + required: true + description: "The github ref that triggered the workflow" +# event: +# required: true +# description: "The github event of the triggering workflow" defaults: run: @@ -103,7 +103,7 @@ jobs: release-branch: name: Release [Branch] uses: ./.github/workflows/node-zxc-build-release-artifact.yaml - if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success'}} + if: ${{ github.event_name == 'workflow_dispatch' }} with: version-policy: branch-commit trigger-env-deploy: integration @@ -125,3 +125,41 @@ jobs: jf-docker-registry: ${{ vars.JF_DOCKER_REGISTRY }} jf-user-name: ${{ vars.JF_USER_NAME }} jf-access-token: ${{ secrets.JF_ACCESS_TOKEN }} + + deploy-ci-trigger: + name: Trigger CI Flows + runs-on: network-node-linux-medium + needs: + - release-branch + steps: + - name: Harden Runner + uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 + with: + egress-policy: audit + + - name: Checkout Code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: '0' + ref: develop + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Trigger ZXF Prepare Extended Test Suite + if: ${{ needs.release-branch.result == 'success' }} + uses: step-security/workflow-dispatch@4d1049025980f72b1327cbfdeecb07fe7a20f577 # v1.2.4 + with: + workflow: .github/workflows/zxf-prepare-extended-test-suite.yaml + repo: hashgraph/hedera-services # ensure we are executing in the hashgraph org + ref: develop # ensure we are always using the workflow definition from the develop branch + token: ${{ secrets.GH_ACCESS_TOKEN }} + inputs: '{ "ref": "${{ inputs.ref }}" }' + +# - name: Trigger ZXF Deploy Integration +# if: ${{ needs.release-branch.result == 'success' }} +# uses: step-security/workflow-dispatch@4d1049025980f72b1327cbfdeecb07fe7a20f577 # v1.2.4 +# with: +# workflow: .github/workflows/node-zxf-deploy-integration.yaml +# repo: hashgraph/hedera-services # ensure we are executing in the hashgraph org +# ref: develop # ensure we are always using the workflow definition from the develop branch +# token: ${{ secrets.GH_ACCESS_TOKEN }} +# inputs: '{ "event": "${{ inputs.event }}" }' diff --git a/.github/workflows/node-flow-fsts-custom-regression.yaml b/.github/workflows/node-flow-fsts-custom-regression.yaml index 415cf26c1ada..1ca07c14ae9d 100644 --- a/.github/workflows/node-flow-fsts-custom-regression.yaml +++ b/.github/workflows/node-flow-fsts-custom-regression.yaml @@ -37,7 +37,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" java-distribution: description: "Java JDK Distribution:" type: string @@ -67,7 +67,7 @@ jobs: branch-name: ${{ github.ref_name }} slack-results-channel: ${{ github.event.inputs.slack-results-channel }} slack-summary-channel: ${{ github.event.inputs.slack-summary-channel }} - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} gradle-version: ${{ github.event.inputs.gradle-version || 'wrapper' }} use-branch-for-slack-channel: false diff --git a/.github/workflows/node-flow-pull-request-checks.yaml b/.github/workflows/node-flow-pull-request-checks.yaml index b47b864e35fc..c71a9c216bb9 100644 --- a/.github/workflows/node-flow-pull-request-checks.yaml +++ b/.github/workflows/node-flow-pull-request-checks.yaml @@ -300,7 +300,7 @@ jobs: with: ref: ${{ github.event.inputs.ref || '' }} java-distribution: temurin - java-version: 21 + java-version: 21.0.4 secrets: gradle-cache-username: ${{ secrets.GRADLE_CACHE_USERNAME }} gradle-cache-password: ${{ secrets.GRADLE_CACHE_PASSWORD }} @@ -316,7 +316,7 @@ jobs: with: ref: ${{ github.event.inputs.ref || '' }} java-distribution: temurin - java-version: 21 + java-version: 21.0.4 secrets: gradle-cache-username: ${{ secrets.GRADLE_CACHE_USERNAME }} gradle-cache-password: ${{ secrets.GRADLE_CACHE_PASSWORD }} diff --git a/.github/workflows/node-zxc-build-release-artifact.yaml b/.github/workflows/node-zxc-build-release-artifact.yaml index 417fb7fb2ff3..97574d979c66 100644 --- a/.github/workflows/node-zxc-build-release-artifact.yaml +++ b/.github/workflows/node-zxc-build-release-artifact.yaml @@ -59,7 +59,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" gradle-version: description: "Gradle Version:" type: string @@ -175,10 +175,10 @@ jobs: fi - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} @@ -218,7 +218,7 @@ jobs: echo "prerelease=${PRERELEASE}" >>"${GITHUB_OUTPUT}" - name: Cache Build Version - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: path: version.txt key: node-build-version-${{ steps.effective-version.outputs.number }}-${{ github.sha }} @@ -275,10 +275,10 @@ jobs: echo "::endgroup::" - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Authenticate to Google Cloud - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" @@ -289,7 +289,7 @@ jobs: if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} @@ -300,14 +300,14 @@ jobs: gradle-version: ${{ inputs.gradle-version }} - name: Restore Build Version - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: fail-on-cache-miss: true path: version.txt key: node-build-version-${{ needs.validate.outputs.version }}-${{ github.sha }} - name: Cache Build Artifacts - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: path: ~/artifact-build key: node-build-artifacts-${{ needs.validate.outputs.version }}-${{ github.sha }} @@ -407,11 +407,11 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} with: token_format: 'access_token' @@ -438,7 +438,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - name: Setup Docker Buildx Support - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 with: version: v0.16.2 driver-opts: network=host @@ -451,7 +451,7 @@ jobs: run: docker run -d -p 5000:5000 --restart=always --name registry registry:latest - name: Docker Login - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} with: registry: ${{ steps.set-registry.outputs.docker-registry }} @@ -459,7 +459,7 @@ jobs: password: ${{ steps.google-auth.outputs.access_token }} - name: Restore Build Artifacts - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: fail-on-cache-miss: true path: ~/artifact-build @@ -471,7 +471,7 @@ jobs: cp -rvf ~/artifact-build/* hedera-node/infrastructure/docker/containers/local-node/main-network-node/sdk/ - name: Build Haveged Image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: push: true platforms: linux/amd64,linux/arm64 @@ -479,7 +479,7 @@ jobs: tags: ${{ steps.set-registry.outputs.docker-tag-base }}/network-node-haveged:${{ needs.validate.outputs.version }} - name: Build Base Image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: push: true platforms: linux/amd64,linux/arm64 @@ -487,7 +487,7 @@ jobs: tags: ${{ steps.set-registry.outputs.docker-tag-base }}/network-node-base:${{ needs.validate.outputs.version }} - name: Build Network Node Image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: push: true platforms: linux/amd64,linux/arm64 @@ -587,11 +587,11 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: token_format: 'access_token' workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" @@ -610,14 +610,14 @@ jobs: run: jfrog rt ping - name: Docker Login (GCP) - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ needs.gcp-production-image.outputs.docker-registry }} username: oauth2accesstoken password: ${{ steps.google-auth.outputs.access_token }} - name: Docker Login (JFrog) - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ${{ needs.jfr-production-image.outputs.docker-registry }} username: ${{ secrets.jf-user-name }} @@ -700,7 +700,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install GnuPG Tools if: ${{ inputs.dry-run-enabled != true }} @@ -727,7 +727,7 @@ jobs: - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: "projects/229164983194/locations/global/workloadIdentityPools/registry-identity-pool/providers/gh-provider" service_account: "artifact-deployer@swirlds-registry.iam.gserviceaccount.com" @@ -737,7 +737,7 @@ jobs: if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} @@ -748,7 +748,7 @@ jobs: gradle-version: ${{ inputs.gradle-version }} - name: Restore Build Version - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: fail-on-cache-miss: true path: version.txt @@ -849,7 +849,7 @@ jobs: arguments: "release${{ inputs.release-profile }} -PpublishingPackageGroup=com.swirlds -Ps01SonatypeHost=true -PpublishSigningEnabled=true --scan --no-configuration-cache" - name: Gradle Publish Services to ${{ inputs.version-policy == 'specified' && 'Maven Central' || 'Google Artifact Registry' }} (${{ inputs.release-profile }}) - uses: gradle/gradle-build-action@29c0906b64b8fc82467890bfb7a0a7ef34bda89e # v3.1.0 + uses: gradle/gradle-build-action@ac2d340dc04d9e1113182899e983b5400c17cda1 # v3.5.0 if: ${{ inputs.dry-run-enabled != true && inputs.release-profile != 'none' && !cancelled() && !failure() }} env: NEXUS_USERNAME: ${{ secrets.svcs-ossrh-username }} diff --git a/.github/workflows/node-zxc-compile-application-code.yaml b/.github/workflows/node-zxc-compile-application-code.yaml index 5ecde8b155da..0251954cdaac 100644 --- a/.github/workflows/node-zxc-compile-application-code.yaml +++ b/.github/workflows/node-zxc-compile-application-code.yaml @@ -102,7 +102,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" node-version: description: "NodeJS Version:" type: string @@ -168,7 +168,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ inputs.ref || '' }} @@ -177,7 +177,7 @@ jobs: run: git fetch --unshallow --no-recurse-submodules - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} diff --git a/.github/workflows/node-zxc-deploy-preview.yaml b/.github/workflows/node-zxc-deploy-preview.yaml index 346c7c167d59..a1953a7bd9bf 100644 --- a/.github/workflows/node-zxc-deploy-preview.yaml +++ b/.github/workflows/node-zxc-deploy-preview.yaml @@ -119,7 +119,7 @@ jobs: fi - name: Authenticate to Google Cloud - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" @@ -144,7 +144,7 @@ jobs: - name: Notify Jenkins of Release (Preview Network) id: jenkins-preview - uses: fjogeleit/http-request-action@0bd00a33db6f82063a3c6befd41f232f61d66583 # v1.15.2 + uses: fjogeleit/http-request-action@bf78da14118941f7e940279dd58f67e863cbeff6 # v1.16.3 if: ${{ inputs.dry-run-enabled != true && !cancelled() && !failure() }} with: url: ${{ secrets.jenkins-preview-url }} diff --git a/.github/workflows/node-zxcron-release-branching.yaml b/.github/workflows/node-zxcron-release-branching.yaml index a8b9f59cb53f..3b8c28e34b9a 100644 --- a/.github/workflows/node-zxcron-release-branching.yaml +++ b/.github/workflows/node-zxcron-release-branching.yaml @@ -46,7 +46,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Read Trigger Time id: time @@ -101,7 +101,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Branch Creation Check id: branch-creation @@ -124,7 +124,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 token: ${{ secrets.GH_ACCESS_TOKEN }} @@ -219,7 +219,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ needs.check-branch.outputs.branch-name }} fetch-depth: 0 diff --git a/.github/workflows/node-zxcron-release-fsts-regression.yaml b/.github/workflows/node-zxcron-release-fsts-regression.yaml index 8514474cf6b6..7a68ca127512 100644 --- a/.github/workflows/node-zxcron-release-fsts-regression.yaml +++ b/.github/workflows/node-zxcron-release-fsts-regression.yaml @@ -25,6 +25,9 @@ defaults: env: BRANCH_LIST_FILE: "${{ github.workspace }}/branches.lst" +permissions: + contents: read + jobs: cron: name: Cron / Launch Workflows @@ -36,7 +39,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 @@ -54,7 +57,7 @@ jobs: major="${BASH_REMATCH[1]}" minor="${BASH_REMATCH[2]}" - if [[ "${major}" -eq 0 && "${minor}" -lt 53 ]]; then + if [[ "${major}" -eq 0 && "${minor}" -lt 55 ]]; then continue fi diff --git a/.github/workflows/node-zxf-deploy-integration.yaml b/.github/workflows/node-zxf-deploy-integration.yaml index 447b0bfdeb56..681fa06f050b 100644 --- a/.github/workflows/node-zxf-deploy-integration.yaml +++ b/.github/workflows/node-zxf-deploy-integration.yaml @@ -17,21 +17,18 @@ name: "ZXF: [Node] Deploy Integration Network Release" on: workflow_dispatch: + inputs: + event: + description: JSON representation of the triggering GitHub event + required: true - workflow_run: - workflows: - - "ZXC: [Node] Deploy Release Artifacts" - types: - - completed - branches: - - develop +permissions: + contents: read jobs: jenkins-checks: name: Build Artifact runs-on: network-node-linux-medium - if: ${{ false }} - steps: - name: Harden Runner uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 @@ -40,10 +37,10 @@ jobs: - name: Notify Jenkins of Release (Integration) id: jenkins-integration - uses: fjogeleit/http-request-action@0bd00a33db6f82063a3c6befd41f232f61d66583 # v1.15.2 + uses: fjogeleit/http-request-action@bf78da14118941f7e940279dd58f67e863cbeff6 # v1.16.3 with: url: ${{ secrets.RELEASE_JENKINS_INTEGRATION_URL }} - data: ${{ toJSON(github.event) }} + data: ${{ inputs.event }} - name: Display Jenkins Payload env: diff --git a/.github/workflows/node-zxf-snyk-monitor.yaml b/.github/workflows/node-zxf-snyk-monitor.yaml index af41861bc274..86662b116c40 100644 --- a/.github/workflows/node-zxf-snyk-monitor.yaml +++ b/.github/workflows/node-zxf-snyk-monitor.yaml @@ -37,10 +37,10 @@ jobs: egress-policy: audit - name: Checkout - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: temurin java-version: 21 diff --git a/.github/workflows/platform-pull-request-extended-checks.yaml b/.github/workflows/platform-pull-request-extended-checks.yaml index 4af44bf9dff8..44c82ae01d68 100644 --- a/.github/workflows/platform-pull-request-extended-checks.yaml +++ b/.github/workflows/platform-pull-request-extended-checks.yaml @@ -31,7 +31,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" java-distribution: description: "Java JDK Distribution:" type: string @@ -61,7 +61,7 @@ jobs: branch-name: ${{ github.ref_name }} slack-results-channel: ${{ github.event.inputs.slack-results-channel }} slack-summary-channel: ${{ github.event.inputs.slack-summary-channel }} - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} gradle-version: ${{ github.event.inputs.gradle-version || 'wrapper' }} use-branch-for-slack-channel: false @@ -86,7 +86,7 @@ jobs: branch-name: ${{ github.ref_name }} slack-results-channel: ${{ github.event.inputs.slack-results-channel }} slack-summary-channel: ${{ github.event.inputs.slack-summary-channel }} - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} gradle-version: ${{ github.event.inputs.gradle-version || 'wrapper' }} use-branch-for-slack-channel: false @@ -111,7 +111,7 @@ jobs: branch-name: ${{ github.ref_name }} slack-results-channel: ${{ github.event.inputs.slack-results-channel }} slack-summary-channel: ${{ github.event.inputs.slack-summary-channel }} - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} gradle-version: ${{ github.event.inputs.gradle-version || 'wrapper' }} use-branch-for-slack-channel: false @@ -136,7 +136,7 @@ jobs: branch-name: ${{ github.ref_name }} slack-results-channel: ${{ github.event.inputs.slack-results-channel }} slack-summary-channel: ${{ github.event.inputs.slack-summary-channel }} - java-version: ${{ github.event.inputs.java-version || '21' }} + java-version: ${{ github.event.inputs.java-version || '21.0.4' }} java-distribution: ${{ github.event.inputs.java-distribution || 'temurin' }} gradle-version: ${{ github.event.inputs.gradle-version || 'wrapper' }} use-branch-for-slack-channel: false diff --git a/.github/workflows/platform-zxc-launch-jrs-workflow.yaml b/.github/workflows/platform-zxc-launch-jrs-workflow.yaml index 35f6850b538a..36088ea96446 100644 --- a/.github/workflows/platform-zxc-launch-jrs-workflow.yaml +++ b/.github/workflows/platform-zxc-launch-jrs-workflow.yaml @@ -43,6 +43,9 @@ on: description: "The Github access token used to checkout the repository, submodules, and make GitHub API calls." required: true +permissions: + contents: read + defaults: run: shell: bash diff --git a/.github/workflows/platform-zxcron-release-jrs-regression.yaml b/.github/workflows/platform-zxcron-release-jrs-regression.yaml index eb180e90c1da..6165a7ab222a 100644 --- a/.github/workflows/platform-zxcron-release-jrs-regression.yaml +++ b/.github/workflows/platform-zxcron-release-jrs-regression.yaml @@ -20,6 +20,9 @@ on: - cron: '0 9 * * *' workflow_dispatch: +permissions: + contents: read + defaults: run: shell: bash @@ -38,7 +41,7 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 @@ -56,7 +59,7 @@ jobs: major="${BASH_REMATCH[1]}" minor="${BASH_REMATCH[2]}" - if [[ "${major}" -eq 0 && "${minor}" -lt 53 ]]; then + if [[ "${major}" -eq 0 && "${minor}" -lt 55 ]]; then continue fi diff --git a/.github/workflows/zxc-jrs-regression.yaml b/.github/workflows/zxc-jrs-regression.yaml index a26807e19d83..1a464ef5db41 100644 --- a/.github/workflows/zxc-jrs-regression.yaml +++ b/.github/workflows/zxc-jrs-regression.yaml @@ -193,7 +193,7 @@ jobs: egress-policy: audit - name: Checkout Platform Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ inputs.ref || inputs.branch-name || '' }} fetch-depth: 0 @@ -216,7 +216,7 @@ jobs: echo "branch-name=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}" - name: Checkout Regression Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: platform-sdk/regression repository: swirlds/swirlds-platform-regression @@ -305,7 +305,7 @@ jobs: sudo chmod +x /usr/local/ping-exporter/ping_exporter - name: Execute Ping Exporter - uses: JarvusInnovations/background-action@313d37130873d82c33fc907b9b78e932aec8e990 # v1.0.5 + uses: JarvusInnovations/background-action@2428e7b970a846423095c79d43f759abf979a635 # v1.0.7 with: run: | sudo setcap cap_net_raw+ep /usr/local/ping-exporter/ping_exporter @@ -317,7 +317,7 @@ jobs: http://localhost:9427/metrics - name: Execute Grafana Agent - uses: JarvusInnovations/background-action@313d37130873d82c33fc907b9b78e932aec8e990 # v1.0.5 + uses: JarvusInnovations/background-action@2428e7b970a846423095c79d43f759abf979a635 # v1.0.7 env: GRAFANA_AGENT_USERNAME: ${{ secrets.grafana-agent-username }} GRAFANA_AGENT_PASSWORD: ${{ secrets.grafana-agent-password }} @@ -336,7 +336,7 @@ jobs: node-version: 18 - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} @@ -369,7 +369,7 @@ jobs: fi - name: Install SSH key - uses: shimataro/ssh-key-action@38b53cb2f445ea2e0eb8872407e366677c41dbc6 # v2.6.1 + uses: shimataro/ssh-key-action@d4fffb50872869abe2d9a9098a6d9c5aa7d16be4 # v2.7.0 with: name: jrs-ssh-keyfile key: ${{ secrets.jrs-ssh-key-file }} @@ -385,7 +385,7 @@ jobs: - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: 'projects/785813846068/locations/global/workloadIdentityPools/jrs-identity-pool/providers/gh-provider' service_account: 'swirlds-automation@swirlds-regression.iam.gserviceaccount.com' diff --git a/.github/workflows/zxc-publish-production-image.yaml b/.github/workflows/zxc-publish-production-image.yaml index 9df7dec25e70..40c82e0f9db8 100644 --- a/.github/workflows/zxc-publish-production-image.yaml +++ b/.github/workflows/zxc-publish-production-image.yaml @@ -93,10 +93,10 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Build Artifacts - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: fail-on-cache-miss: true path: ~/artifact-build @@ -104,7 +104,7 @@ jobs: - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 if: ${{ inputs.dry-run-enabled != true && inputs.registry-name == 'gcp' && !cancelled() && !failure() }} with: token_format: "access_token" @@ -155,7 +155,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - name: Setup Docker Buildx Support - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 with: version: v0.16.2 driver-opts: network=host @@ -168,7 +168,7 @@ jobs: run: docker run -d -p 5000:5000 --restart=always --name registry registry:latest - name: Docker Login (GCP) - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 if: ${{ inputs.dry-run-enabled != true && inputs.registry-name == 'gcp' && !cancelled() && !failure() }} with: registry: ${{ steps.set-registry.outputs.docker-registry }} @@ -176,7 +176,7 @@ jobs: password: ${{ steps.google-auth.outputs.access_token }} - name: Docker Login (JFrog) - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 if: ${{ inputs.dry-run-enabled != true && inputs.registry-name == 'jfrog' && !cancelled() && !failure() }} with: registry: ${{ steps.set-registry.outputs.docker-registry }} @@ -189,7 +189,7 @@ jobs: cp -rvf ~/artifact-build/* hedera-node/infrastructure/docker/containers/production-next/consensus-node/sdk/ - name: Build Consensus Node Image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 env: SOURCE_DATE_EPOCH: ${{ steps.commit.outputs.source-date }} with: diff --git a/.github/workflows/zxc-verify-docker-build-determinism.yaml b/.github/workflows/zxc-verify-docker-build-determinism.yaml index 8f46eb8ae6a1..2426ece647d8 100644 --- a/.github/workflows/zxc-verify-docker-build-determinism.yaml +++ b/.github/workflows/zxc-verify-docker-build-determinism.yaml @@ -32,7 +32,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" secrets: gradle-cache-username: @@ -79,13 +79,13 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ inputs.ref }} - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" service_account: "swirlds-automation@hedera-registry.iam.gserviceaccount.com" @@ -118,7 +118,7 @@ jobs: echo "file=${BASELINE_FILE}" >> "${GITHUB_OUTPUT}" - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 if: ${{ steps.baseline.outputs.exists == 'false' && !failure() && !cancelled() }} with: distribution: ${{ inputs.java-distribution }} @@ -197,7 +197,7 @@ jobs: if: ${{ steps.baseline.outputs.exists == 'false' && !failure() && !cancelled() }} - name: Setup Docker Buildx Support - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 if: ${{ steps.baseline.outputs.exists == 'false' && !failure() && !cancelled() }} with: version: v0.16.2 @@ -236,7 +236,7 @@ jobs: echo "::endgroup::" - name: Build Docker Image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 env: SOURCE_DATE_EPOCH: ${{ steps.commit.outputs.source-date }} if: ${{ steps.baseline.outputs.exists == 'false' && !failure() && !cancelled() }} @@ -299,12 +299,12 @@ jobs: git config --global core.eol lf - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ inputs.ref }} - name: Setup Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: 3.9 @@ -336,7 +336,7 @@ jobs: - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" service_account: "swirlds-automation@hedera-registry.iam.gserviceaccount.com" @@ -422,7 +422,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - name: Setup Docker Buildx Support - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 with: version: v0.16.2 driver-opts: network=host @@ -440,7 +440,7 @@ jobs: run: docker info - name: Build Docker Image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 env: SOURCE_DATE_EPOCH: ${{ needs.generate-baseline.outputs.source-date }} with: diff --git a/.github/workflows/zxc-verify-gradle-build-determinism.yaml b/.github/workflows/zxc-verify-gradle-build-determinism.yaml index 3c88169e9fc8..6b51b0d55a20 100644 --- a/.github/workflows/zxc-verify-gradle-build-determinism.yaml +++ b/.github/workflows/zxc-verify-gradle-build-determinism.yaml @@ -32,7 +32,7 @@ on: description: "Java JDK Version:" type: string required: false - default: "21" + default: "21.0.4" secrets: gradle-cache-username: @@ -73,12 +73,12 @@ jobs: egress-policy: audit - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ inputs.ref }} - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} @@ -90,7 +90,7 @@ jobs: - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" service_account: "swirlds-automation@hedera-registry.iam.gserviceaccount.com" @@ -165,17 +165,17 @@ jobs: git config --global core.eol lf - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ inputs.ref }} - name: Setup Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: 3.9 - name: Setup Java - uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4.5.0 with: distribution: ${{ inputs.java-distribution }} java-version: ${{ inputs.java-version }} @@ -191,7 +191,7 @@ jobs: - name: Authenticate to Google Cloud id: google-auth - uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 + uses: google-github-actions/auth@8254fb75a33b976a221574d287e93919e6a36f70 # v2.1.6 with: workload_identity_provider: "projects/235822363393/locations/global/workloadIdentityPools/hedera-builds-pool/providers/hedera-builds-gh-actions" service_account: "swirlds-automation@hedera-registry.iam.gserviceaccount.com" diff --git a/.github/workflows/zxcron-extended-test-suite.yaml b/.github/workflows/zxcron-extended-test-suite.yaml index 7af193669325..3a4953d2256f 100644 --- a/.github/workflows/zxcron-extended-test-suite.yaml +++ b/.github/workflows/zxcron-extended-test-suite.yaml @@ -22,7 +22,12 @@ on: - cron: '0 */3 * * *' permissions: - contents: write + id-token: write + actions: write + pull-requests: write + statuses: write + checks: write + contents: read defaults: run: @@ -36,31 +41,58 @@ jobs: name: Fetch XTS Candidate Tag runs-on: network-node-linux-medium outputs: - xts_tag_exists: ${{ steps.check_tags_exist.outputs.xts_tag_exists }} - xts_tag_commit: ${{ steps.check_tags_exist.outputs.xts_tag_commit }} + xts-tag-exists: ${{ steps.check-tags-exist.outputs.xts-tag-exists }} + xts-tag-commit: ${{ steps.check-tags-exist.outputs.xts-tag-commit }} steps: + - name: Harden Runner + uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 + with: + egress-policy: audit + # Checkout the latest from dev - name: Checkout Code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' + ref: develop + token: ${{ secrets.GH_ACCESS_TOKEN }} # Check if the xts-candidate tag exists - # the command git branch --contains xts_tag_commit | grep -q - # will return an exit code of 1 if the tag commit is not found on the develop + # the command git branch --contains xts-tag-commit | grep --quiet + # will return an exit code of 1 if the tagged commit is not found on the develop # branch. - # TODO: Should we delete the tag as part of this job? Or should it occur after XTS passes? - name: Check for tags - id: check_tags_exist + id: check-tags-exist + env: + GH_TOKEN: ${{ github.token }} run: | - TAG=${XTS_CANDIDATE_TAG} - if [ $(git tag -l "${TAG}") ]; then - echo "xts_tag_exists=true" >> $GITHUB_OUTPUT - XTS_COMMIT=`git rev-list -n 1 ${XTS_CANDIDATE_TAG}` - git branch --contains ${XTS_COMMIT} | grep -q develop - echo "xts_tag_commit=`${XTS_COMMIT}`" >> $GITHUB_OUTPUT - git tag -d ${XTS_CANDIDATE_TAG} - git push --delete origin ${XTS_CANDIDATE_TAG} + # Check if the tag exists and if so grab its commit id + set +e + XTS_COMMIT=$(git rev-list -n 1 "${XTS_CANDIDATE_TAG}") >/dev/null 2>&1 + XTS_COMMIT_FOUND="${?}" + set -e + + # Cancel out if the tag does not exist + if [[ "${XTS_COMMIT_FOUND}" -ne 0 ]]; then + gh run cancel ${{ github.run_id }} + fi + + # Check if the tag exists on the develop branch + set +e + git branch --contains "${XTS_COMMIT}" | grep --quiet develop >/dev/null 2>&1 + BRANCH_ON_DEVELOP="${?}" + set -e + + # If the tag exists on the Develop Branch set the output variables as appropriate + # Otherwise cancel out + if [[ "${BRANCH_ON_DEVELOP}" -eq 0 ]]; then + echo "xts-tag-exists=true" >> $GITHUB_OUTPUT + echo "xts-tag-commit=${XTS_COMMIT}" >> $GITHUB_OUTPUT + echo "### XTS-Candidate commit found" >> $GITHUB_STEP_SUMMARY + echo "xts-tag-commit=${XTS_COMMIT}" >> $GITHUB_STEP_SUMMARY + + git push --delete origin "${XTS_CANDIDATE_TAG}" + git tag -d "${XTS_CANDIDATE_TAG}" else gh run cancel ${{ github.run_id }} fi @@ -78,7 +110,7 @@ jobs: enable-hammer-tests: true enable-hapi-tests-time-consuming: true enable-network-log-capture: true - ref: ${{ needs.fetch-xts-candidate.outputs.xts_tag_commit }} + ref: ${{ needs.fetch-xts-candidate.outputs.xts-tag-commit }} secrets: access-token: ${{ secrets.GITHUB_TOKEN }} gradle-cache-username: ${{ secrets.GRADLE_CACHE_USERNAME }} @@ -93,7 +125,7 @@ jobs: with: custom-job-name: "Platform SDK" panel-config: "configs/suites/GCP-PRCheck-Abbrev-4N.json" - ref: ${{ needs.fetch-xts-candidate.outputs.xts_tag_commit }} # pass the xts-candidate tag to the JRS panel for checkout + ref: ${{ needs.fetch-xts-candidate.outputs.xts-tag-commit }} # pass the xts-candidate tag to the JRS panel for checkout branch-name: ${{ github.head_ref || github.ref_name }} base-branch-name: ${{ github.base_ref || '' }} slack-results-channel: "regression-test" @@ -119,7 +151,7 @@ jobs: needs.fetch-xts-candidate.outputs.xts_tag_exists == 'true' }} with: custom-job-name: "Abbrev Update Test" - ref: ${{ needs.fetch-xts-candidate.outputs.xts_tag_commit }} # pass the xts-candidate tag to the JRS panel for checkout + ref: ${{ needs.fetch-xts-candidate.outputs.xts-tag-commit }} # pass the xts-candidate tag to the JRS panel for checkout branch-name: ${{ github.head_ref || github.ref_name }} hedera-tests-enabled: true use-branch-for-slack-channel: false @@ -155,21 +187,35 @@ jobs: - name: Checkout Tagged Code id: checkout_tagged_code - if: ${{ needs.fetch-xts-candidate.outputs.xts_tag_exists == 'true' }} - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + if: ${{ needs.fetch-xts-candidate.outputs.xts-tag-exists == 'true' }} + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: '0' + ref: ${{ needs.fetch-xts-candidate.outputs.xts-tag-commit }} + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Import GPG Key + id: gpg_importer + uses: step-security/ghaction-import-gpg@6c8fe4d0126a59d57c21f87c9ae5dd3451fa3cca # v6.1.0 with: - ref: ${{ needs.fetch-xts-candidate.outputs.xts_tag_commit }} # this becomes an input to the reusable flow + git_commit_gpgsign: true + git_tag_gpgsign: true + git_user_signingkey: true + gpg_private_key: ${{ secrets.SVCS_GPG_KEY_CONTENTS }} + passphrase: ${{ secrets.SVCS_GPG_KEY_PASSPHRASE }} # Now that the XTS suite has run we should be able to tag for promotion - name: Tag for XTS promotion run: | - EPOCH_TIME=`date -j -f "%a %b %d %T %Z %Y" "\`LC_ALL=C date\`" "+%s"` + EPOCH_TIME=$(date +%s) TAG=xts-pass-${EPOCH_TIME} - git tag --annotate ${TAG} + git tag --annotate ${TAG} --message "chore: tagging commit for build candidate promotion" git push --set-upstream origin --tags + echo "### Commit Tagged for Promotion" >> $GITHUB_STEP_SUMMARY + echo "promotion-tag=${TAG}" >> $GITHUB_STEP_SUMMARY report-failure: - name: Report XTS preparation failure + name: Report XTS execution failure runs-on: network-node-linux-medium needs: - abbreviated-panel @@ -178,11 +224,12 @@ jobs: - hedera-node-jrs-panel - tag-for-promotion - if: ${{ needs.abbreviated-panel.result != 'success' || + if: ${{ (needs.abbreviated-panel.result != 'success' || needs.extended-test-suite.result != 'success' || needs.fetch-xts-candidate.result != 'success' || needs.hedera-node-jrs-panel.result != 'success' || - needs.tag-for-promotion.result != 'success' }} + needs.tag-for-promotion.result != 'success') && + !cancelled() && always() }} steps: - name: Harden Runner uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 diff --git a/.github/workflows/zxcron-promote-build-candidate.yaml b/.github/workflows/zxcron-promote-build-candidate.yaml new file mode 100644 index 000000000000..9c96245c603b --- /dev/null +++ b/.github/workflows/zxcron-promote-build-candidate.yaml @@ -0,0 +1,207 @@ +## +# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +name: "ZXCron: Promote Build Candidate" +on: + workflow_dispatch: + schedule: + # Runs Promote Build Candidate at 2000 hours + - cron: '0 20 * * *' + +permissions: + actions: write + contents: read + statuses: write + +defaults: + run: + shell: bash + +jobs: + determine-build-candidate: + name: Fetch Latest Build Candidate + runs-on: network-node-linux-medium + outputs: + build-candidate-exists: ${{ steps.find-build-candidates.outputs.build-candidate-exists }} + build-candidate-commit: ${{ steps.find-build-candidates.outputs.build-candidate-commit }} + steps: + - name: Harden Runner + uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 + with: + egress-policy: audit + + # Checkout the latest from dev + - name: Checkout Code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: '0' + ref: develop + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Find Build Candidates + id: find-build-candidates + env: + GH_TOKEN: ${{ github.token }} + run: | + TAG_PATTERN="xts-pass-*" + CANDIDATE_TAG="$(git tag --list --sort=-version:refname "${TAG_PATTERN}" | head --lines 1)" + if [[ -n "${CANDIDATE_TAG}" ]]; then + CANDIDATE_COMMIT=$(git rev-list --max-count 1 ${CANDIDATE_TAG}) + if git branch --contains "${CANDIDATE_COMMIT}" | grep --quiet develop >/dev/null 2>&1; then + git push --delete origin $(git tag --list "${TAG_PATTERN}") + git tag --delete $(git tag --list "${TAG_PATTERN}") + echo "build-candidate-exists=true" >> "${GITHUB_OUTPUT}" + echo "build-candidate-commit=${CANDIDATE_COMMIT}" >> "${GITHUB_OUTPUT}" + echo "### Build Candidate Found" >> "${GITHUB_STEP_SUMMARY}" + echo "build-candidate-commit=${CANDIDATE_COMMIT}" >> "${GITHUB_STEP_SUMMARY}" + echo "build_candidate_tag=${CANDIDATE_TAG}" >> "${GITHUB_STEP_SUMMARY}" + else + gh run cancel "${{ github.run_id }}" + fi + else + gh run cancel "${{ github.run_id }}" + fi + + promote-build-candidate: + name: Promote Build Candidate + runs-on: network-node-linux-medium + needs: determine-build-candidate + if: ${{ needs.determine-build-candidate.result == 'success' && needs.determine-build-candidate.outputs.build-candidate-exists == 'true' }} + steps: + - name: Harden Runner + uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 + with: + egress-policy: audit + + - name: Checkout Tagged Code + id: checkout-tagged-code + if: ${{ needs.determine-build-candidate.outputs.build-candidate-exists == 'true' }} + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: '0' + ref: ${{ needs.determine-build-candidate.outputs.build-candidate-commit }} + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Import GPG Key + id: gpg_importer + uses: step-security/ghaction-import-gpg@6c8fe4d0126a59d57c21f87c9ae5dd3451fa3cca # v6.1.0 + with: + git_commit_gpgsign: true + git_tag_gpgsign: true + git_user_signingkey: true + gpg_private_key: ${{ secrets.SVCS_GPG_KEY_CONTENTS }} + passphrase: ${{ secrets.SVCS_GPG_KEY_PASSPHRASE }} + + - name: Tag Build Candidate + env: + BUILD_INDEX: ${{ vars.XTS_BUILD_PROMOTION_INDEX }} + run: | + BUILD_TAG="$(printf "build-%05d" "${BUILD_INDEX}")" + git tag --annotate ${BUILD_TAG} --message "chore: tagging commit for build promotion" + git push --set-upstream origin --tags + echo "### Build Promotion Tag Information" >> "${GITHUB_STEP_SUMMARY}" + echo "build-tag=${BUILD_TAG}" >> "${GITHUB_STEP_SUMMARY}" + + - name: Increment Build Promotion Index + uses: action-pack/increment@14c9f7fbbf560e7518ccaeab781aeca7bff15069 # v2.12 + id: increment + with: + name: 'XTS_BUILD_PROMOTION_INDEX' + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Preview Next Build + env: + NEXT_BUILD_ID: ${{ steps.increment.outputs.value }} + run: | + NEXT_BUILD_TAG="$(printf "build-%05d" "${NEXT_BUILD_ID}")" + echo "### Preview Next Build Tag" >> "${GITHUB_STEP_SUMMARY}" + echo "Next build tag is: ${NEXT_BUILD_TAG}" >> "${GITHUB_STEP_SUMMARY}" + + report-failure: + name: Report XTS execution failure + runs-on: network-node-linux-medium + needs: + - determine-build-candidate + - promote-build-candidate + if: ${{ (needs.determine-build-candidate.result != 'success' || needs.promote-build-candidate.result != 'success') && !cancelled() && always() }} + steps: + - name: Harden Runner + uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 + with: + egress-policy: audit + + - name: Report failure (slack) + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 + env: + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_CITR_WEBHOOK }} + with: + payload: | + { + "attachments": [ + { + "color": "#7647cd", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":grey_exclamation: Hedera Services - Build Candidate Promotion Error Report", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Build Candidate Promotion Job Resulted in failure. See status below.*" + }, + "fields": [ + { + "type": "plain_text", + "text": "Fetch Latest Build Candidate" + }, + { + "type": "plain_text", + "text": "${{ needs.determine-build-candidate.result }}" + }, + { + "type": "plain_text", + "text": "Promote Build Candidate" + }, + { + "type": "plain_text", + "text": "${{ needs.promote-build-candidate.result }}" + } + ] + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Source Commit*: \n<${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}>" + } + } + ] + } + ] + } diff --git a/.github/workflows/zxf-prepare-extended-test-suite.yaml b/.github/workflows/zxf-prepare-extended-test-suite.yaml index 42158896471a..9a21e473c2f7 100644 --- a/.github/workflows/zxf-prepare-extended-test-suite.yaml +++ b/.github/workflows/zxf-prepare-extended-test-suite.yaml @@ -16,19 +16,18 @@ name: "ZXF: Prepare Extended Test Suite" on: - workflow_run: - workflows: - - "ZXF: Deploy Production Release" - types: - - completed - branches: - - develop + workflow_dispatch: + inputs: + ref: + description: Git Commit Reference for the XTS prep tag + required: true defaults: run: shell: bash permissions: + actions: write contents: write env: @@ -38,20 +37,38 @@ jobs: tag-for-xts: name: Tag for XTS promotion runs-on: network-node-linux-medium - if: ${{ github.event.workflow_run.conclusion == 'success' && !github.event.workflow_run.head_repository.fork && github.event.workflow_run.head_branch == 'develop'}} steps: - name: Harden Runner uses: step-security/harden-runner@f086349bfa2bd1361f7909c78558e816508cdc10 # v2.8.0 with: egress-policy: audit - - name: Checkout code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - name: Checkout Default Branch + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' - ref: ${{ github.event.workflow_run.head_sha }} + ref: 'develop' + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Validate Input Ref + id: validate-input + env: + COMMIT_ID: ${{ inputs.ref }} + run: | + if git merge-base --is-ancestor "${COMMIT_ID}" develop >/dev/null 2>&1; then + echo "commit_on_dev=true" >> $GITHUB_OUTPUT + else + echo "::error title=Branch Alignment::The provided commit (${COMMIT_ID}) is not present on the develop branch." + exit 1 + fi + + - name: Checkout Code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + if: ${{ steps.validate-input.outputs.commit_on_dev == 'true'}} + with: + fetch-depth: '0' + ref: ${{ inputs.ref }} token: ${{ secrets.GH_ACCESS_TOKEN }} - persist-credentials: 'true' - name: Import GPG Key id: gpg_importer @@ -66,11 +83,24 @@ jobs: # move the tag if successful - name: Tag Code and push run: | - git tag --force --sign ${XTS_CANDIDATE_TAG} --message "Tagging commit for XTS promotion" + # Check if the tag exists + set +e + git rev-list -n 1 "${XTS_CANDIDATE_TAG}" >/dev/null 2>&1 + XTS_COMMIT_FOUND="${?}" + set -e + + # Delete the tag if it does exist + if [[ "${XTS_COMMIT_FOUND}" -eq 0 ]]; then + git push --delete origin "${XTS_CANDIDATE_TAG}" + git tag -d "${XTS_CANDIDATE_TAG}" + fi + + # Create the new tag + git tag --annotate "${XTS_CANDIDATE_TAG}" --message "chore: tagging commit for XTS promotion" git push --set-upstream origin --tags - name: Report failure - if: ${{ failure() }} + if: ${{ !cancelled() && failure() && always() }} uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 env: SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/hapi/hedera-protobufs/block/stream/output/smart_contract_service.proto b/hapi/hedera-protobufs/block/stream/output/smart_contract_service.proto index 8f4d39ca027a..a5f0807702a7 100644 --- a/hapi/hedera-protobufs/block/stream/output/smart_contract_service.proto +++ b/hapi/hedera-protobufs/block/stream/output/smart_contract_service.proto @@ -131,7 +131,11 @@ message EthereumOutput { * This field is not settled and MAY be removed or modified. */ repeated proto.TransactionSidecarRecord sidecars = 1; - + /** + * An ethereum hash value. + *

+ * This SHALL be a keccak256 hash of the ethereumData. + */ bytes ethereum_hash = 2; oneof eth_result { diff --git a/hapi/hedera-protobufs/block/stream/output/token_service.proto b/hapi/hedera-protobufs/block/stream/output/token_service.proto index c4a8fa2524e3..70c2925569fb 100644 --- a/hapi/hedera-protobufs/block/stream/output/token_service.proto +++ b/hapi/hedera-protobufs/block/stream/output/token_service.proto @@ -35,7 +35,6 @@ option java_package = "com.hedera.hapi.block.stream.output.protoc"; option java_multiple_files = true; import "custom_fees.proto"; -import "transaction_record.proto"; /** * Block Stream data for a `createToken` transaction. diff --git a/hapi/hedera-protobufs/services/node_create.proto b/hapi/hedera-protobufs/services/node_create.proto index 24d06f8a5480..03d6fa282df6 100644 --- a/hapi/hedera-protobufs/services/node_create.proto +++ b/hapi/hedera-protobufs/services/node_create.proto @@ -137,4 +137,27 @@ message NodeCreateTransactionBody { * This field is REQUIRED and MUST NOT be set to an empty `KeyList`. */ proto.Key admin_key = 7; + + /** + * An ALT_BN128 elliptic curve public encryption key.
+ * This is controlled by the node operator and specific to this + * node's TSS operations. + *

+ * The elliptic curve type MAY change in the future. For example, + * if the Ethereum ecosystem creates precompiles for BLS12_381, + * we may switch to that curve.
+ * This value SHALL be specified according to EIP-196 and EIP-197 standards. + * See [EIP-196](https://eips.ethereum.org/EIPS/eip-196#encoding) and + * (EIP-197](https://eips.ethereum.org/EIPS/eip-197#encoding)
+ * This field is OPTIONAL (that is, it can initially be null), + * but once set, it MUST NOT be null.
+ * If this field is set: + *

+ */ + bytes tss_encryption_key = 8; } diff --git a/hapi/hedera-protobufs/services/node_update.proto b/hapi/hedera-protobufs/services/node_update.proto index 834c6b9bd93b..978eb16a8943 100644 --- a/hapi/hedera-protobufs/services/node_update.proto +++ b/hapi/hedera-protobufs/services/node_update.proto @@ -162,4 +162,27 @@ message NodeUpdateTransactionBody { * If set, this field MUST NOT be set to an empty `KeyList`. */ proto.Key admin_key = 8; + + /** + * An ALT_BN128 elliptic curve public encryption key.
+ * This is controlled by the node operator and specific to this + * node's TSS operations. + *

+ * The elliptic curve type MAY change in the future. For example, + * if the Ethereum ecosystem creates precompiles for BLS12_381, + * we may switch to that curve.
+ * This value SHALL be specified according to EIP-196 and EIP-197 standards. + * See [EIP-196](https://eips.ethereum.org/EIPS/eip-196#encoding) and + * (EIP-197](https://eips.ethereum.org/EIPS/eip-197#encoding)
+ * This field is OPTIONAL (that is, it can initially be null), + * but once set, it MUST NOT be null.
+ * If this field is set: + *

+ */ + bytes tss_encryption_key = 9; } diff --git a/hapi/hedera-protobufs/services/state/addressbook/node.proto b/hapi/hedera-protobufs/services/state/addressbook/node.proto index e00b5f386051..7e5931076520 100644 --- a/hapi/hedera-protobufs/services/state/addressbook/node.proto +++ b/hapi/hedera-protobufs/services/state/addressbook/node.proto @@ -146,4 +146,27 @@ message Node { * This field is REQUIRED and MUST NOT be set to an empty `KeyList`. */ proto.Key admin_key = 10; + + /** + * An ALT_BN128 elliptic curve public encryption key.
+ * This is controlled by the node operator and specific to this + * node's TSS operations. + *

+ * The elliptic curve type MAY change in the future. For example, + * if the Ethereum ecosystem creates precompiles for BLS12_381, + * we may switch to that curve.
+ * This value SHALL be specified according to EIP-196 and EIP-197 standards. + * See [EIP-196](https://eips.ethereum.org/EIPS/eip-196#encoding) and + * (EIP-197](https://eips.ethereum.org/EIPS/eip-197#encoding)
+ * This field is OPTIONAL (that is, it can initially be null), + * but once set, it MUST NOT be null.
+ * If this field is set: + *

+ */ + bytes tss_encryption_key = 11; } diff --git a/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto b/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto index f7b19530eb86..6aad7db3c5ab 100644 --- a/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto +++ b/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto @@ -50,7 +50,7 @@ option java_multiple_files = true; * _each_ block, but MUST be updated at the beginning of the _next_ block.
* This value SHALL contain the block hash up to, and including, the * immediately prior completed block.
- * The state change to update this singleton MUST be the last "output" + * The state change to update this singleton MUST be the last * block item in this block. */ message BlockStreamInfo { @@ -74,7 +74,7 @@ message BlockStreamInfo { * A concatenation of hash values.
* This combines several trailing output block item hashes and * is used as a seed value for a pseudo-random number generator.
- * This is also required to implement the EVM `PREVRANDAO` opcode. + * This is also required to implement the EVM `PREVRANDAO` opcode.
* This MUST contain at least 256 bits of entropy. */ bytes trailing_output_hashes = 3; diff --git a/hedera-node/docs/design/services/smart-contract-service/frictionless-airdrops-system-contracts.md b/hedera-node/docs/design/services/smart-contract-service/frictionless-airdrops-system-contracts.md new file mode 100644 index 000000000000..8a851a2fd0f6 --- /dev/null +++ b/hedera-node/docs/design/services/smart-contract-service/frictionless-airdrops-system-contracts.md @@ -0,0 +1,175 @@ +# Frictionless Airdrops via System Contracts + +## Purpose + +[HIP-904](https://hips.hedera.com/hip/hip-904) introduced the Frictionless Airdrops feature for fungible and non-fungible tokens. +This document will define the architecture and implementation of the `airdropToken`, `claimAirdrops`, `cancelAirdrops` and `rejectTokens` smart contract functions +and their respective redirect function calls (`cancelAirdropFT`, `cancelAirdropNFT`, `claimAirdropFT`, `claimAirdropNFT`, `rejectTokenFT`, `rejectTokenNFTs`, `setUnlimitedAutomaticAssociations`) that will extend the capabilities of the Hedera Smart Contract Service (HSCS) to support frictionless airdrops. + +## References + +[HIP-904](https://hips.hedera.com/hip/hip-904) - HIP that introduces the frictionless airdrops. + +## Goals + +- Expose `airdropToken`, `claimAirdrops`, `cancelAirdrops` and `rejectTokens` as new functions in the Hedera Token Service Smart Contract. +- Expose `cancelAirdropFT`, `cancelAirdropNFT`, `claimAirdropFT`, `claimAirdropNFT`, `rejectTokenFt`, `rejectTokenNFTs`, `setUnlimitedAutomaticAssociations` as new functions in a new proxy redirect token facade contract IHRC904. +- Implement the needed HTS system contract classes to support the new functions. + +## Non Goals + +- The implementation of the HAPI operation, as it is already an existing feature. + +## Architecture + +The architecture for the frictionless airdrops follows the existing framework defined for handling all calls to the HederaTokenService system contract in the modularization services and is described in more detail in the Implementation section below. + +## Implementation + +### New Solidity Structures + +We will introduce the following new structures to support the new functionality: + +`PendingAirdrop` - A struct that represents a pending airdrop request. + +```solidity +struct PendingAirdrop { + address sender; + address receiver; + + address token; + int64 serial; +} +``` + +`NftID` - A struct that represents the Nft serial to be rejected. + +```solidity +struct NftID { + address nft; + int64 serial; +} +``` + +### New Solidity Functions + +New system contract functions must be added to the `IHederaTokenService` interface to support airdropping tokens. + +| Function Selector Hash | Function Signature | HAPI Transaction | Response | | +|------------------------|---------------------------------------------------------------------------------------------------------------------|------------------|----------------|---------------------------------| +| `0x2f348119` | `function airdropTokens(TokenTransferList[] memory tokenTransfers) external returns (int64 responseCode)` | TokenAirdrop | `ResponseCode` | The response code from the call | +| `0x012ebcaf` | `function cancelAirdrops(PendingAirdrop[] memory pendingAirdrops) external returns (int64 responseCode)` | TokenCancel | `ResponseCode` | The response code from the call | +| `0x05961641` | `function claimAirdrops(PendingAirdrop[] memory pendingAirdrops) external returns (int64 responseCode)` | TokenClaim | `ResponseCode` | The response code from the call | +| `0x179300d7` | `function rejectTokens(address[] memory ftAddresses, NftID[] memory nftIDs) external returns (int64 responseCode)` | TokenReject | `ResponseCode` | The response code from the call | + +New system contract functions must be added to a new `IHRC904` interface to support airdropping tokens. + +| Function Selector Hash | Function Signature | HAPI Transaction | Responsible service | Response | | +|------------------------|----------------------------------------------------------------------------------------------------------------------|------------------|---------------------|----------------|---------------------------------| +| `0xcef5b705` | `function cancelAirdropFT(address receiverAddress) external returns (uint256 responseCode)` | TokenCancel | HTS | `ResponseCode` | The response code from the call | +| `0xad4917cf` | `function cancelAirdropNFT(address receiverAddress, int64 serialNumber) external returns (uint256 responseCode)` | TokenCancel | HTS | `ResponseCode` | The response code from the call | +| `0xa83bc5b2` | `function claimAirdropFT(address senderAddress) external returns (uint256 responseCode)` | TokenClaim | HTS | `ResponseCode` | The response code from the call | +| `0x63ada5d7` | `function claimAirdropNFT(address senderAddress, int64 serialNumber) external returns (uint256 responseCode)` | TokenClaim | HTS | `ResponseCode` | The response code from the call | +| `0x76c6b391` | `function rejectTokenFT() external returns (uint256 responseCode)` | TokenReject | HTS | `ResponseCode` | The response code from the call | +| `0xa869c78a` | `function rejectTokenNFTs(int64[] memory serialNumbers) external returns (uint256 responseCode)` | TokenReject | HTS | `ResponseCode` | The response code from the call | +| `0x966884d4` | `function setUnlimitedAutomaticAssociations(boolean enableAutoAssociations) external returns (uint256 responseCode)` | CryptoUpdate | HAS | `ResponseCode` | The response code from the call | + +#### Input limitations + +- The `airdropTokens` function will accept an array of `TokenTransferList` with a maximum of 10 elements by default managed by `tokens.maxAllowedAirdropTransfersPerTx` configuration. +- The `cancelAirdrops` function will accept an array of `PendingAirdrop` with a maximum of 10 elements by default managed by `tokens.maxAllowedPendingAirdropsToCancel` configuration. +- The `claimAirdrops` function will accept an array of `PendingAirdrop` with a maximum of 10 elements by default managed by `tokens.maxAllowedPendingAirdropsToClaim` configuration. +- The `rejectTokens` function will accept array of `address` and `NftID` with a maximum of 10 elements combined by default managed by `ledger.tokenRejects.maxLen` configuration. Same limitation applies to `rejectTokenNFTs` function. +- The `setAutomaticAssociations` function will accept a boolean value to set the automatic associations to -1 if true and 0 for false. + +### System Contract Module + +- `AirdropTokensTranslator` - This class will be responsible for handling the `airdropTokens` selector and dispatching it to the corresponding HAPI calls. +- `AirdropTokensDecoder` - This class provides methods and constants for decoding the given `HtsCallAttempt` into a `TokenTransferList` for `TokenAirdrop` call. +- `CancelAirdropsTranslator` - This class will be responsible for handling the `cancelAirdrops`, `cancelAirdropFT` and `cancelAirdropNFT` selectors and dispatching them to the corresponding HAPI calls. +- `CancelAirdropsDecoder` - This class provides methods and constants for decoding the given `HtsCallAttempt` into a `PendingAirdropId` list for `TokenCancelAirdrop` call. +- `ClaimAirdropsTranslator` - This class will be responsible for handling the `claimAirdrops`, `claimAirdropFT` and `claimAirdropNFT` selectors and dispatching them to the corresponding HAPI calls. +- `ClaimAirdropsDecoder` - This class provides methods and constants for decoding the given `HtsCallAttempt` into a `PendingAirdropId` list for `TokenClaimAirdrop` call. +- `RejectTokensTranslator` - This class will be responsible for handling the `rejectTokens`, `rejectTokenFT` and `rejectTokenNFT` selectors and dispatching them to the corresponding HAPI calls. +- `RejectTokensDecoder` - This class provides methods and constants for decoding the given `HtsCallAttempt` into `TokenReference` list for `TokenReject` call. +- `SetAutomaticAssociationsTranslator` - This class will be responsible for handling the `setAutomaticAssociations` selector and dispatching it to the corresponding HAPI calls. +- `SetAutomaticAssociationsCall` - This class provides methods for preparing and executing the given `HasCallAttempt`. + +### Feature Flags + +In order to gate the newly introduced system contract calls, we will introduce the following feature flags: +- `contracts.systemContract.airdropTokens.enabled` - Enable/Disable the `airdropTokens` system contract call. +- `contracts.systemContract.cancelAirdrops.enabled` - Enable/Disable the `cancelAirdrops`, `cancelAirdropFT` and `cancelAirdropNFT` system contract calls. +- `contracts.systemContract.claimAirdrops.enabled` - Enable/Disable the `claimAirdrops`, `claimAirdropFT` and `claimAirdropNFT` system contract calls. +- `contracts.systemContract.rejectTokens.enabled` - Enable/Disable the `rejectTokens`, `rejectTokenFT` and `rejectTokenNFTs` system contract calls. +- `contracts.systemContract.setUnlimitedAutoAssociations.enabled` - Enable/Disable the `setUnlimitedAutomaticAssociations` system contract call. + +## Security Implications + +The newly added flows will adopt the HAPI authorization logic and the security V2 model. +We will apply the `TokenReject`, `TokenAirdrop`, `TokenClaimAirdrop`, `TokenCancelAirdrop` throttle mechanisms. + +## Acceptance Tests + +### BDD Tests + +#### Positive Tests + +- Verify that the `airdropTokens` function airdrops multiple tokens both ft and nft to multiple accounts. +- Verify that the `airdropTokens` function airdrops 10 tokens both ft and nft to multiple accounts. +- Verify that the `airdropTokens` function airdrops a fungible token to an account. +- Verify that the `airdropTokens` function airdrops a nft token to an account. +- Verify that the `cancelAirdrops` function cancels multiple pending airdrops. +- Verify that the `cancelAirdrops` function cancels 10 pending airdrops. +- Verify that the `cancelAirdrops` function cancels single pending airdrop. +- Verify that the `claimAirdrops` function claims multiple pending airdrops. +- Verify that the `claimAirdrops` function claims 10 pending airdrops. +- Verify that the `claimAirdrops` function claims a single pending airdrop. +- Verify that the `rejectTokens` function rejects tokens for multiple accounts. +- Verify that the `cancelAirdropFT` function cancels a pending airdrop of the redirected token. +- Verify that the `cancelAirdropNFT` function cancels a pending airdrop of the redirected nft serial. +- Verify that the `claimAirdropFT` function claims a pending airdrop of the redirected token. +- Verify that the `claimAirdropNFT` function claims a pending airdrop of the redirected nft serial number. +- Verify that the `rejectTokenFT` function rejects tokens for a given account. +- Verify that the `rejectTokenNFTs` function rejects tokens for a given account and serial number. +- Verify that the `rejectTokenNFTs` function rejects 10 tokens for a given account and serial number. +- Verify that the `setUnlimitedAutomaticAssociations` function enables the automatic associations to unlimited (-1) for a given account. +- Verify that the `setUnlimitedAutomaticAssociations` function disables the automatic associations to zero for a given account. + +#### Negative Tests + +- Verify that the `airdropTokens` function fails when the sender does not have enough balance. +- Verify that the `airdropTokens` function fails when the receiver does not have a valid account. +- Verify that the `airdropTokens` function fails when the token does not exist. +- Verify that the `airdropTokens` function fails when the airdrop amounts are out of bounds. +- Verify that the `airdropTokens` function fails when 11 or more airdrops are provided. +- Verify that the `cancelAirdrops` function fails when the sender does not have any pending airdrops. +- Verify that the `cancelAirdrops` function fails when the sender does not have a valid account. +- Verify that the `cancelAirdrops` function fails when the receiver does not have a valid account. +- Verify that the `cancelAirdrops` function fails when the token does not exist. +- Verify that the `cancelAirdrops` function fails when the nft does not exist. +- Verify that the `cancelAirdrops` function fails when 11 or more pending airdrops are provided. +- Verify that the `cancelAirdrops` function fails when the nft serial number does not exist. +- Verify that the `claimAirdrops` function fails when the sender does not have any pending airdrops. +- Verify that the `claimAirdrops` function fails when the sender does not have a valid account. +- Verify that the `claimAirdrops` function fails when 11 or more pending airdrops are provided. +- Verify that the `claimAirdrops` function fails when the receiver does not have a valid account. +- Verify that the `claimAirdrops` function fails when the token does not exist. +- Verify that the `claimAirdrops` function fails when the nft does not exist. +- Verify that the `claimAirdrops` function fails when the nft serial number does not exist. +- Verify that the `rejectTokens` function fails when the sender does not have any associated tokens. +- Verify that the `rejectTokens` function fails when the sender does not have a pending airdrop. +- Verify that the `rejectTokens` function fails when the provided fungible token is invalid. +- Verify that the `rejectTokens` function fails when the provided nft is invalid. +- Verify that the `cancelAirdropFT` function fails when the sender does not have any pending airdrops. +- Verify that the `cancelAirdropFT` function fails when the sender does not have a valid account. +- Verify that the `cancelAirdropNFT` function fails when the sender does not have any pending airdrops. +- Verify that the `cancelAirdropNFT` function fails when the sender does not have a valid account. +- Verify that the `claimAirdropFT` function fails when the sender does not have any pending airdrops. +- Verify that the `claimAirdropFT` function fails when the sender does not have a valid account. +- Verify that the `claimAirdropNFT` function fails when the sender does not have any pending airdrops. +- Verify that the `claimAirdropNFT` function fails when the sender does not have a valid account. +- Verify that the `rejectTokenFT` function fails when the sender does not have any tokens. +- Verify that the `rejectTokenFT` function fails when the sender does not have a valid account. +- Verify that the `rejectTokensNFT` function fails when the sender does not have any tokens. +- Verify that the `rejectTokensNFT` function fails when 11 or more serials are provided. diff --git a/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/ReadableNodeStoreImpl.java b/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/ReadableNodeStoreImpl.java index 19d1d5e29a3f..8004e700e16e 100644 --- a/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/ReadableNodeStoreImpl.java +++ b/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/ReadableNodeStoreImpl.java @@ -21,12 +21,16 @@ import com.hedera.hapi.node.state.addressbook.Node; import com.hedera.hapi.node.state.common.EntityNumber; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.hedera.node.app.service.addressbook.ReadableNodeStore; import com.swirlds.state.spi.ReadableKVState; import com.swirlds.state.spi.ReadableStates; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.ArrayList; import java.util.Iterator; +import java.util.Objects; /** * Provides read-only methods for interacting with the underlying data storage mechanisms for @@ -49,6 +53,11 @@ public ReadableNodeStoreImpl(@NonNull final ReadableStates states) { this.nodesState = states.get(NODES_KEY); } + @Override + public Roster newRosterFromNodes() { + return constructFromNodesState(nodesState()); + } + /** * Returns the node needed. * @@ -77,4 +86,25 @@ protected > T nodesState() { public Iterator keys() { return nodesState().keys(); } + + private Roster constructFromNodesState(@NonNull final ReadableKVState nodesState) { + final var rosterEntries = new ArrayList(); + for (final Iterator it = nodesState.keys(); it.hasNext(); ) { + final var nodeNumber = it.next(); + final var nodeDetail = nodesState.get(nodeNumber); + if (!nodeDetail.deleted()) { + final var entry = RosterEntry.newBuilder() + .nodeId(nodeDetail.nodeId()) + .weight(nodeDetail.weight()) + .gossipCaCertificate(nodeDetail.gossipCaCertificate()) + .gossipEndpoint(nodeDetail.gossipEndpoint()) + .tssEncryptionKey(nodeDetail.tssEncryptionKey()) + .build(); + rosterEntries.add(entry); + } + } + + rosterEntries.sort((re1, re2) -> Objects.compare(re1.nodeId(), re2.nodeId(), Long::compareTo)); + return Roster.newBuilder().rosterEntries(rosterEntries).build(); + } } diff --git a/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/ReadableNodeStoreImplTest.java b/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/ReadableNodeStoreImplTest.java index 4605323af37f..f57f4db1d902 100644 --- a/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/ReadableNodeStoreImplTest.java +++ b/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/ReadableNodeStoreImplTest.java @@ -25,18 +25,22 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; import com.hedera.hapi.node.state.addressbook.Node; import com.hedera.hapi.node.state.common.EntityNumber; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.hedera.node.app.service.addressbook.ReadableNodeStore; import com.hedera.node.app.service.addressbook.impl.ReadableNodeStoreImpl; import com.hedera.node.app.service.addressbook.impl.test.handlers.AddressBookTestBase; +import com.swirlds.state.spi.ReadableKVState; import com.swirlds.state.test.fixtures.MapReadableKVState; import java.util.Set; import org.assertj.core.util.Streams; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; class ReadableNodeStoreImplTest extends AddressBookTestBase { @@ -107,4 +111,39 @@ void keysWorks() { assertEquals( keySet, Set.of(new EntityNumber(1), new EntityNumber(2), new EntityNumber(4), new EntityNumber(5))); } + + @Test + @DisplayName("Constructing a new roster includes all of the latest nodes defined in state") + void newRosterFromNodesIncludesAllUndeletedDefinitions() { + final ReadableKVState nodesState = emptyReadableNodeStateBuilder() + .value(EntityNumber.newBuilder().number(1).build(), NODE_1) + .value(EntityNumber.newBuilder().number(2).build(), NODE_2) + .value(EntityNumber.newBuilder().number(3).build(), NODE_3) + .value( + EntityNumber.newBuilder().number(4).build(), + Node.newBuilder().nodeId(4).weight(40).deleted(true).build()) + .build(); + given(readableStates.get(anyString())).willReturn(nodesState); + + subject = new ReadableNodeStoreImpl(readableStates); + final var result = subject.newRosterFromNodes(); + org.assertj.core.api.Assertions.assertThat(result.rosterEntries()) + .containsExactlyInAnyOrder(ROSTER_NODE_1, ROSTER_NODE_2, ROSTER_NODE_3); + } + + private static final Node NODE_1 = Node.newBuilder().nodeId(1).weight(10).build(); + private static final RosterEntry ROSTER_NODE_1 = RosterEntry.newBuilder() + .nodeId(NODE_1.nodeId()) + .weight(NODE_1.weight()) + .build(); + private static final Node NODE_2 = Node.newBuilder().nodeId(2).weight(20).build(); + private static final RosterEntry ROSTER_NODE_2 = RosterEntry.newBuilder() + .nodeId(NODE_2.nodeId()) + .weight(NODE_2.weight()) + .build(); + private static final Node NODE_3 = Node.newBuilder().nodeId(3).weight(30).build(); + private static final RosterEntry ROSTER_NODE_3 = RosterEntry.newBuilder() + .nodeId(NODE_3.nodeId()) + .weight(NODE_3.weight()) + .build(); } diff --git a/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/AddressBookTestBase.java b/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/AddressBookTestBase.java index 50683eafdbec..e019b4efc5b9 100644 --- a/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/AddressBookTestBase.java +++ b/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/AddressBookTestBase.java @@ -142,6 +142,8 @@ public class AddressBookTestBase { private final byte[] invalidIPBytes = {49, 46, 48, 46, 48, 46, 48}; protected final ServiceEndpoint endpoint10 = new ServiceEndpoint(Bytes.wrap(invalidIPBytes), 1234, null); + private static final Bytes TSS_KEY = Bytes.wrap(new byte[] {1, 2, 3}); + protected Node node; @Mock @@ -246,7 +248,8 @@ protected void givenValidNode(boolean deleted) { Bytes.wrap(grpcCertificateHash), 0, deleted, - key); + key, + TSS_KEY); } protected void givenValidNodeWithAdminKey(Key adminKey) { @@ -260,7 +263,8 @@ protected void givenValidNodeWithAdminKey(Key adminKey) { Bytes.wrap(grpcCertificateHash), 0, false, - adminKey); + adminKey, + TSS_KEY); } protected Node createNode() { @@ -274,6 +278,7 @@ protected Node createNode() { .grpcCertificateHash(Bytes.wrap(grpcCertificateHash)) .weight(0) .adminKey(key) + .tssEncryptionKey(TSS_KEY) .build(); } diff --git a/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/NodeCreateHandlerTest.java b/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/NodeCreateHandlerTest.java index f603a30e6f0a..a2dacb4d3714 100644 --- a/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/NodeCreateHandlerTest.java +++ b/hedera-node/hedera-addressbook-service-impl/src/test/java/com/hedera/node/app/service/addressbook/impl/test/handlers/NodeCreateHandlerTest.java @@ -526,7 +526,7 @@ void handleFailsWhenInvalidAdminKey() { } @Test - void hanldeWorkAsExpected() throws CertificateEncodingException { + void handleWorksAsExpected() throws CertificateEncodingException { txn = new NodeCreateBuilder() .withAccountId(accountId) .withDescription("Description") diff --git a/hedera-node/hedera-addressbook-service/src/main/java/com/hedera/node/app/service/addressbook/ReadableNodeStore.java b/hedera-node/hedera-addressbook-service/src/main/java/com/hedera/node/app/service/addressbook/ReadableNodeStore.java index c626d133109a..d41c40ad2be6 100644 --- a/hedera-node/hedera-addressbook-service/src/main/java/com/hedera/node/app/service/addressbook/ReadableNodeStore.java +++ b/hedera-node/hedera-addressbook-service/src/main/java/com/hedera/node/app/service/addressbook/ReadableNodeStore.java @@ -18,6 +18,7 @@ import com.hedera.hapi.node.state.addressbook.Node; import com.hedera.hapi.node.state.common.EntityNumber; +import com.hedera.hapi.node.state.roster.Roster; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Iterator; @@ -30,6 +31,14 @@ */ public interface ReadableNodeStore { + /** + * Constructs a new {@link Roster} object using the current info for each node defined in state. + * Accordingly, be warned that this method iterates over all nodes. + * + * @return a new roster, representing the most current node configurations available + */ + Roster newRosterFromNodes(); + /** * Returns the node needed. If the node doesn't exist returns failureReason. If the * node exists , the failure reason will be null. diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java index 56e0abbdc990..61ec7b02a9ba 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java @@ -16,11 +16,10 @@ package com.hedera.node.app.spi; -import static com.hedera.hapi.node.base.ResponseCodeEnum.FAIL_INVALID; - import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.node.app.spi.signatures.SignatureVerifier; +import com.swirlds.common.crypto.Signature; import edu.umd.cs.findbugs.annotations.NonNull; import java.time.InstantSource; @@ -37,34 +36,54 @@ interface Gossip { * A {@link Gossip} that throws an exception indicating it should never have been used; for example, * if the client code was running in a standalone mode. */ - Gossip UNAVAILABLE_GOSSIP = body -> { - throw new IllegalArgumentException("" + FAIL_INVALID); + Gossip UNAVAILABLE_GOSSIP = new Gossip() { + @Override + public void submit(@NonNull final TransactionBody body) { + throw new IllegalStateException("Gossip is not available!"); + } + + @Override + public Signature sign(final byte[] ledgerId) { + throw new IllegalStateException("Gossip is not available!"); + } }; /** * Attempts to submit the given transaction to the network. + * * @param body the transaction to submit - * @throws IllegalStateException if the network is not active; the client should retry later + * @throws IllegalStateException if the network is not active; the client should retry later * @throws IllegalArgumentException if body is invalid; so the client can retry immediately with a - * different transaction id if the exception's message is {@link ResponseCodeEnum#DUPLICATE_TRANSACTION} + * different transaction id if the exception's message is {@link ResponseCodeEnum#DUPLICATE_TRANSACTION} */ void submit(@NonNull TransactionBody body); + + /** + * Signs the given bytes with the node's RSA key and returns the signature. + * + * @param bytes the bytes to sign + * @return the signature + */ + Signature sign(byte[] bytes); } /** * The source of the current instant. + * * @return the instant source */ InstantSource instantSource(); /** * The signature verifier the application workflows will use. + * * @return the signature verifier */ SignatureVerifier signatureVerifier(); /** * The {@link Gossip} can be used to submit transactions to the network when it is active. + * * @return the gossip interface */ Gossip gossip(); diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/fees/FeeContext.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/fees/FeeContext.java index d06f399e32e5..409c30dd4450 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/fees/FeeContext.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/fees/FeeContext.java @@ -67,7 +67,7 @@ public interface FeeContext { * * @return the {@code Configuration} */ - @Nullable + @NonNull Configuration configuration(); /** diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyComparator.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyComparator.java index 737e87439e21..2783feeeef04 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyComparator.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyComparator.java @@ -30,10 +30,10 @@ *
These include maps, sets, lists, arrays, etc... *
The methods in this class are used in hot spot code, so allocation must be kept to a bare * minimum, and anything likely to have performance questions should be avoided. - *
Note that comparing keys is unavoidably costly. We try to exit as early as possible throughout - * this class, but worst case we're comparing every simple key byte-by-byte for the entire tree, which - * may be up to 15 levels deep with any number of keys per level. We haven't seen a key with - * several million "simple" keys included, but that does not mean nobody will create one. + *
Note that comparing keys can be fairly costly, as in principle a key structure can have a + * serialized size up to about {@code TransactionConfig#transactionMaxBytes()}. We try to exit as + * early as possible throughout this class, but worst case we're comparing every simple key + * byte-by-byte for the entire tree. */ public class KeyComparator implements Comparator { @Override diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyVerifier.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyVerifier.java index 3c74db270cc0..3269c95a0416 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyVerifier.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/key/KeyVerifier.java @@ -16,15 +16,20 @@ package com.hedera.node.app.spi.key; +import static java.util.Collections.unmodifiableSortedSet; + import com.hedera.hapi.node.base.Key; import com.hedera.node.app.spi.signatures.SignatureVerification; import com.hedera.node.app.spi.signatures.VerificationAssistant; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.SortedSet; +import java.util.TreeSet; /** * Helper class that contains all functionality for verifying signatures during handle. */ public interface KeyVerifier { + SortedSet NO_CRYPTO_KEYS = unmodifiableSortedSet(new TreeSet<>(new KeyComparator())); /** * Gets the {@link SignatureVerification} for the given key. If this key was not provided during pre-handle, then @@ -60,4 +65,20 @@ public interface KeyVerifier { */ @NonNull SignatureVerification verificationFor(@NonNull Key key, @NonNull VerificationAssistant callback); + + /** + * If this verifier is based on cryptographic verification of signatures on a transaction submitted from + * outside the blockchain, returns the set of cryptographic keys that had valid signatures, ordered by the + * {@link KeyComparator}. + *

+ * Default is an empty set, for verifiers that use a more abstract concept of signing, such as, + *

    + *
  1. Whether a key references the contract whose EVM address is the recipient address of the active frame.
  2. + *
  3. Whether a key is present in the signatories list of a scheduled transaction.
  4. + *
+ * @return the set of cryptographic keys that had valid signatures for this transaction. + */ + default SortedSet signingCryptoKeys() { + return NO_CRYPTO_KEYS; + } } diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/HandleContext.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/HandleContext.java index 78a4207bde59..c761bab4dd7b 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/HandleContext.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/HandleContext.java @@ -38,6 +38,7 @@ import com.hedera.node.app.spi.workflows.record.StreamBuilder; import com.swirlds.config.api.Configuration; import com.swirlds.state.spi.info.NetworkInfo; +import com.swirlds.state.spi.info.NodeInfo; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.time.Instant; @@ -551,6 +552,12 @@ static void throwIfMissingPayerId(@NonNull final TransactionBody body) { @NonNull Map dispatchPaidRewards(); + /** + * Returns the {@link NodeInfo} for the node this transaction is created from. + * @return the node info + */ + NodeInfo creatorInfo(); + /** * Whether a dispatch should be throttled at consensus. True for everything except certain dispatches * internal to the EVM which are only constrained by gas. diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/PreHandleContext.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/PreHandleContext.java index dcd70e7d8060..5ed85aa6ba7e 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/PreHandleContext.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/PreHandleContext.java @@ -276,13 +276,13 @@ PreHandleContext requireKeyIfReceiverSigRequired( /** * Returns all (required and optional) keys of a nested transaction. * - * @param nestedTxn the {@link TransactionBody} which keys are needed - * @param payerForNested the payer for the nested transaction + * @param body the {@link TransactionBody} which keys are needed + * @param payerId the payer for the nested transaction * @return the set of keys * @throws PreCheckException If there is a problem with the nested transaction */ @NonNull - TransactionKeys allKeysForTransaction(@NonNull TransactionBody nestedTxn, @NonNull AccountID payerForNested) + TransactionKeys allKeysForTransaction(@NonNull TransactionBody body, @NonNull AccountID payerId) throws PreCheckException; /** diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/TransactionKeys.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/TransactionKeys.java index 80d5ebc78d11..810283412a53 100644 --- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/TransactionKeys.java +++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/workflows/TransactionKeys.java @@ -25,7 +25,6 @@ * Contains all keys and hollow accounts (required and optional) of a transaction. */ public interface TransactionKeys { - /** * Getter for the payer key * diff --git a/hedera-node/hedera-app-spi/src/main/java/module-info.java b/hedera-node/hedera-app-spi/src/main/java/module-info.java index 7e71e2fa1ddd..ca84080f4df9 100644 --- a/hedera-node/hedera-app-spi/src/main/java/module-info.java +++ b/hedera-node/hedera-app-spi/src/main/java/module-info.java @@ -1,6 +1,7 @@ module com.hedera.node.app.spi { requires transitive com.hedera.node.app.hapi.utils; requires transitive com.hedera.node.hapi; + requires transitive com.swirlds.common; requires transitive com.swirlds.config.api; requires transitive com.swirlds.state.api; requires transitive com.hedera.pbj.runtime; diff --git a/hedera-node/hedera-app-spi/src/testFixtures/java/com/hedera/node/app/spi/fixtures/workflows/FakePreHandleContext.java b/hedera-node/hedera-app-spi/src/testFixtures/java/com/hedera/node/app/spi/fixtures/workflows/FakePreHandleContext.java index 912a28791d56..472bc7d5edd3 100644 --- a/hedera-node/hedera-app-spi/src/testFixtures/java/com/hedera/node/app/spi/fixtures/workflows/FakePreHandleContext.java +++ b/hedera-node/hedera-app-spi/src/testFixtures/java/com/hedera/node/app/spi/fixtures/workflows/FakePreHandleContext.java @@ -423,8 +423,7 @@ public PreHandleContext requireSignatureForHollowAccountCreation(@NonNull final @NonNull @Override - public TransactionKeys allKeysForTransaction( - @NonNull TransactionBody nestedTxn, @NonNull AccountID payerForNested) { + public TransactionKeys allKeysForTransaction(@NonNull TransactionBody body, @NonNull AccountID payerId) { throw new UnsupportedOperationException("Not yet implemented"); } diff --git a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java index d309f4057bc8..66d772d61658 100644 --- a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java +++ b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java @@ -43,6 +43,7 @@ import com.hedera.node.app.fixtures.state.FakeState; import com.hedera.node.app.services.AppContextImpl; import com.hedera.node.app.spi.signatures.SignatureVerifier; +import com.hedera.node.app.tss.PlaceholderTssLibrary; import com.hedera.node.app.tss.TssBaseServiceImpl; import com.hedera.node.config.ConfigProvider; import com.hedera.pbj.runtime.OneOf; @@ -117,6 +118,8 @@ public static void main(String... args) throws Exception { private final TssBaseServiceImpl tssBaseService = new TssBaseServiceImpl( new AppContextImpl(Instant::now, fakeSignatureVerifier(), UNAVAILABLE_GOSSIP), ForkJoinPool.commonPool(), + ForkJoinPool.commonPool(), + new PlaceholderTssLibrary(), ForkJoinPool.commonPool()); private final BlockStreamManagerImpl subject = new BlockStreamManagerImpl( NoopBlockItemWriter::new, diff --git a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java index f5606da96977..31daa4cd0c35 100644 --- a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java +++ b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java @@ -43,6 +43,7 @@ import com.hedera.node.app.fixtures.state.FakeState; import com.hedera.node.app.services.AppContextImpl; import com.hedera.node.app.spi.signatures.SignatureVerifier; +import com.hedera.node.app.tss.PlaceholderTssLibrary; import com.hedera.node.app.tss.TssBaseServiceImpl; import com.hedera.node.config.ConfigProvider; import com.hedera.node.config.data.BlockStreamConfig; @@ -93,6 +94,8 @@ public class StandaloneRoundManagement { private final TssBaseServiceImpl tssBaseService = new TssBaseServiceImpl( new AppContextImpl(Instant::now, fakeSignatureVerifier(), UNAVAILABLE_GOSSIP), ForkJoinPool.commonPool(), + ForkJoinPool.commonPool(), + new PlaceholderTssLibrary(), ForkJoinPool.commonPool()); private final BlockStreamManagerImpl subject = new BlockStreamManagerImpl( NoopBlockItemWriter::new, diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java index 1f4c6f6cbfe7..99e4902c554d 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java @@ -77,7 +77,7 @@ import com.hedera.node.app.info.GenesisNetworkInfo; import com.hedera.node.app.info.StateNetworkInfo; import com.hedera.node.app.records.BlockRecordService; -import com.hedera.node.app.roster.RosterServiceImpl; +import com.hedera.node.app.roster.RosterService; import com.hedera.node.app.service.addressbook.impl.AddressBookServiceImpl; import com.hedera.node.app.service.consensus.impl.ConsensusServiceImpl; import com.hedera.node.app.service.contract.impl.ContractServiceImpl; @@ -115,12 +115,14 @@ import com.hedera.node.config.data.VersionConfig; import com.hedera.node.config.types.StreamMode; import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.RosterStateId; import com.swirlds.common.constructable.ClassConstructorPair; import com.swirlds.common.constructable.ConstructableRegistry; import com.swirlds.common.constructable.ConstructableRegistryException; import com.swirlds.common.constructable.RuntimeConstructable; import com.swirlds.common.crypto.CryptographyHolder; import com.swirlds.common.crypto.Hash; +import com.swirlds.common.crypto.Signature; import com.swirlds.common.notification.NotificationEngine; import com.swirlds.common.platform.NodeId; import com.swirlds.config.api.Configuration; @@ -400,7 +402,7 @@ public Hedera( new CongestionThrottleService(), new NetworkServiceImpl(), new AddressBookServiceImpl(), - new RosterServiceImpl(), + new RosterService(), PLATFORM_STATE_SERVICE) .forEach(servicesRegistry::register); try { @@ -487,7 +489,7 @@ public List initPlatformState(@NonNull final State state) final var deserializedVersion = serviceMigrator.creationVersionOf(state); return serviceMigrator.doMigrations( state, - servicesRegistry.subRegistryFor(EntityIdService.NAME, PlatformStateService.NAME), + servicesRegistry.subRegistryFor(EntityIdService.NAME, PlatformStateService.NAME, RosterStateId.NAME), deserializedVersion == null ? null : new ServicesSoftwareVersion(deserializedVersion), version, bootstrapConfigProvider.getConfiguration(), @@ -596,7 +598,6 @@ private List onMigrate( final var readableStore = new ReadablePlatformStateStore(state.getReadableStates(PlatformStateService.NAME)); final var genesisRoster = createRoster(requireNonNull(readableStore.getAddressBook())); - genesisNetworkInfo = new GenesisNetworkInfo(genesisRoster, ledgerConfig.id()); } final List migrationStateChanges = new ArrayList<>(); @@ -693,6 +694,11 @@ public void submit(@NonNull final TransactionBody body) { } } + @Override + public Signature sign(final byte[] ledgerId) { + return platform.sign(ledgerId); + } + /** * Called to perform orderly close record streams. */ diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java index 51c3a84bbc2a..7c76ac3a9007 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java @@ -38,6 +38,7 @@ import com.hedera.node.app.services.OrderedServiceMigrator; import com.hedera.node.app.services.ServicesRegistryImpl; +import com.hedera.node.app.tss.PlaceholderTssLibrary; import com.hedera.node.app.tss.TssBaseServiceImpl; import com.swirlds.base.time.Time; import com.swirlds.common.constructable.ConstructableRegistry; @@ -370,6 +371,11 @@ private static Hedera newHedera() { ServicesRegistryImpl::new, new OrderedServiceMigrator(), InstantSource.system(), - appContext -> new TssBaseServiceImpl(appContext, ForkJoinPool.commonPool(), ForkJoinPool.commonPool())); + appContext -> new TssBaseServiceImpl( + appContext, + ForkJoinPool.commonPool(), + ForkJoinPool.commonPool(), + new PlaceholderTssLibrary(), + ForkJoinPool.commonPool())); } } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/fees/ChildFeeContextImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/fees/ChildFeeContextImpl.java index dd4783b6148f..560132398f49 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/fees/ChildFeeContextImpl.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/fees/ChildFeeContextImpl.java @@ -108,7 +108,7 @@ public FeeCalculatorFactory feeCalculatorFactory() { } @Override - public @Nullable Configuration configuration() { + public @NonNull Configuration configuration() { return context.configuration(); } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/RosterServiceImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/RosterService.java similarity index 85% rename from hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/RosterServiceImpl.java rename to hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/RosterService.java index ba4368294484..b933934815b7 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/RosterServiceImpl.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/RosterService.java @@ -18,7 +18,8 @@ import static java.util.Objects.requireNonNull; -import com.hedera.node.app.roster.schemas.V0540RosterSchema; +import com.swirlds.common.RosterStateId; +import com.swirlds.platform.state.service.schemas.V0540RosterSchema; import com.swirlds.state.spi.SchemaRegistry; import com.swirlds.state.spi.Service; import edu.umd.cs.findbugs.annotations.NonNull; @@ -28,14 +29,12 @@ * Registers the roster schemas with the {@link SchemaRegistry}. * Not exposed outside `hedera-app`. */ -public class RosterServiceImpl implements Service { - /** The name of this service */ - public static final String NAME = "RosterService"; +public class RosterService implements Service { @NonNull @Override public String getServiceName() { - return NAME; + return RosterStateId.NAME; } @Override diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DefaultKeyVerifier.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DefaultKeyVerifier.java index 5b5f6e8ebd53..fb7639518920 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DefaultKeyVerifier.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DefaultKeyVerifier.java @@ -23,19 +23,25 @@ import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.base.KeyList; +import com.hedera.node.app.spi.key.KeyComparator; import com.hedera.node.app.spi.signatures.SignatureVerification; import com.hedera.node.app.spi.signatures.VerificationAssistant; import com.hedera.node.config.data.HederaConfig; import com.hedera.pbj.runtime.io.buffer.Bytes; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.AbstractMap; +import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Supplier; +import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -43,9 +49,10 @@ * Base implementation of {@link AppKeyVerifier} */ public class DefaultKeyVerifier implements AppKeyVerifier { - private static final Logger logger = LogManager.getLogger(DefaultKeyVerifier.class); + private static final Comparator KEY_COMPARATOR = new KeyComparator(); + private final int legacyFeeCalcNetworkVpt; private final long timeout; private final Map keyVerifications; @@ -137,6 +144,16 @@ public int numSignaturesVerified() { return legacyFeeCalcNetworkVpt; } + @Override + public SortedSet signingCryptoKeys() { + return keyVerifications.entrySet().stream() + .map(entry -> new AbstractMap.SimpleImmutableEntry<>( + entry.getKey(), resolveFuture(entry.getValue(), () -> failedVerification(entry.getKey())))) + .filter(e -> e.getValue().passed()) + .map(Map.Entry::getKey) + .collect(Collectors.toCollection(() -> new TreeSet<>(KEY_COMPARATOR))); + } + /** * Get a {@link Future} for the given key. * diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DelegateKeyVerifier.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DelegateKeyVerifier.java deleted file mode 100644 index 2e9597ed87ed..000000000000 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/signature/DelegateKeyVerifier.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (C) 2023-2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.hedera.node.app.signature; - -import static com.hedera.node.app.signature.impl.SignatureVerificationImpl.failedVerification; -import static com.hedera.node.app.signature.impl.SignatureVerificationImpl.passedVerification; -import static java.util.Objects.requireNonNull; - -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.KeyList; -import com.hedera.node.app.signature.impl.SignatureVerificationImpl; -import com.hedera.node.app.spi.signatures.SignatureVerification; -import com.hedera.node.app.spi.signatures.VerificationAssistant; -import com.hedera.pbj.runtime.io.buffer.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.function.Predicate; - -/** - * A {@link AppKeyVerifier} that delegates resolves complex keys and passes checks for primitive keys - * to a provided {@link Predicate}-verifier. - */ -public class DelegateKeyVerifier implements AppKeyVerifier { - - private final Predicate baseVerifier; - - /** - * Constructs a {@link DelegateKeyVerifier} - * - * @param baseVerifier the base verifier - */ - public DelegateKeyVerifier(@NonNull final Predicate baseVerifier) { - this.baseVerifier = requireNonNull(baseVerifier, "baseVerifier must not be null"); - } - - @NonNull - @Override - public SignatureVerification verificationFor(@NonNull final Key key) { - requireNonNull(key, "key must not be null"); - return doVerification(key, baseVerifier); - } - - @NonNull - @Override - public SignatureVerification verificationFor( - @NonNull final Key key, @NonNull final VerificationAssistant callback) { - requireNonNull(key, "key must not be null"); - requireNonNull(callback, "callback must not be null"); - final Predicate composedVerifier = key1 -> { - final var intermediateVerification = baseVerifier.test(key1) - ? SignatureVerificationImpl.passedVerification(key1) - : SignatureVerificationImpl.failedVerification(key1); - return callback.test(key1, intermediateVerification); - }; - return doVerification(key, composedVerifier); - } - - @NonNull - @Override - public SignatureVerification verificationFor(@NonNull Bytes evmAlias) { - requireNonNull(evmAlias, "evmAlias must not be null"); - return failedVerification(evmAlias); - } - - @Override - public int numSignaturesVerified() { - return 0; - } - - /** - * Does a complex verification of a {@link Key} using the provided {@link Predicate} to check primitive keys. - * - * @param key the {@link Key} to verify - * @param primitiveVerifier the {@link Predicate} to use to verify primitive keys - * @return the {@link SignatureVerification} result - */ - @NonNull - private static SignatureVerification doVerification( - @NonNull final Key key, @NonNull final Predicate primitiveVerifier) { - final var result = - switch (key.key().kind()) { - case KEY_LIST -> { - final var keys = key.keyListOrThrow().keys(); - boolean failed = keys.isEmpty(); // an empty keyList fails by definition - for (final var childKey : keys) { - failed |= - doVerification(childKey, primitiveVerifier).failed(); - } - yield !failed; - } - case THRESHOLD_KEY -> { - final var thresholdKey = key.thresholdKeyOrThrow(); - final var keyList = thresholdKey.keysOrElse(KeyList.DEFAULT); - final var keys = keyList.keys(); - final var threshold = thresholdKey.threshold(); - final var clampedThreshold = Math.max(1, Math.min(threshold, keys.size())); - var passed = 0; - for (final var childKey : keys) { - if (doVerification(childKey, primitiveVerifier).passed()) { - passed++; - } - } - yield passed >= clampedThreshold; - } - default -> primitiveVerifier.test(key); - }; - return result ? passedVerification(key) : failedVerification(key); - } -} diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/state/merkle/MerkleSchemaRegistry.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/state/merkle/MerkleSchemaRegistry.java index 79677b7a9b0d..ea04a326128f 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/state/merkle/MerkleSchemaRegistry.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/state/merkle/MerkleSchemaRegistry.java @@ -21,6 +21,7 @@ import static com.hedera.node.app.state.merkle.SchemaApplicationType.STATE_DEFINITIONS; import static com.hedera.node.app.state.merkle.VersionUtils.alreadyIncludesStateDefs; import static com.hedera.node.app.state.merkle.VersionUtils.isSoOrdered; +import static com.hedera.node.app.workflows.handle.metric.UnavailableMetrics.UNAVAILABLE_METRICS; import static java.util.Objects.requireNonNull; import com.hedera.hapi.node.base.SemanticVersion; @@ -346,7 +347,15 @@ private RedefinedWritableStates applyStateDefinitions( new VirtualMap<>(label, keySerializer, valueSerializer, dsBuilder); return virtualMap; }, - virtualMap -> virtualMap.registerMetrics(metrics)); + // Register the metrics for the virtual map if they are available. + // Early rounds of migration done by services such as PlatformStateService, + // EntityIdService and RosterService will not have metrics available yet, but their + // later rounds of migration will. + // Therefore, for the first round of migration, we will not register the metrics for + // virtual maps. + UNAVAILABLE_METRICS.equals(metrics) + ? virtualMap -> {} + : virtualMap -> virtualMap.registerMetrics(metrics)); } }); diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ReadableStoreFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ReadableStoreFactory.java index 5e6c98518fcd..fe01e873e2b8 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ReadableStoreFactory.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ReadableStoreFactory.java @@ -57,9 +57,12 @@ import com.hedera.node.app.service.token.impl.ReadableStakingInfoStoreImpl; import com.hedera.node.app.service.token.impl.ReadableTokenRelationStoreImpl; import com.hedera.node.app.service.token.impl.ReadableTokenStoreImpl; +import com.swirlds.common.RosterStateId; import com.swirlds.platform.state.MerkleStateRoot; import com.swirlds.platform.state.service.PlatformStateService; import com.swirlds.platform.state.service.ReadablePlatformStateStore; +import com.swirlds.platform.state.service.ReadableRosterStore; +import com.swirlds.platform.state.service.ReadableRosterStoreImpl; import com.swirlds.platform.system.SoftwareVersion; import com.swirlds.state.State; import com.swirlds.state.spi.ReadableStates; @@ -111,9 +114,11 @@ private static Map, StoreEntry> createFactoryMap() { newMap.put( ReadableBlockRecordStore.class, new StoreEntry(BlockRecordService.NAME, ReadableBlockRecordStore::new)); newMap.put(ReadableNodeStore.class, new StoreEntry(AddressBookService.NAME, ReadableNodeStoreImpl::new)); + // Platform newMap.put( ReadablePlatformStateStore.class, new StoreEntry(PlatformStateService.NAME, ReadablePlatformStateStore::new)); + newMap.put(ReadableRosterStore.class, new StoreEntry(RosterStateId.NAME, ReadableRosterStoreImpl::new)); return Collections.unmodifiableMap(newMap); } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java index 8b225a4042bf..a3fb51c1d3d4 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java @@ -43,7 +43,9 @@ import com.hedera.node.app.service.token.impl.WritableTokenRelationStore; import com.hedera.node.app.service.token.impl.WritableTokenStore; import com.hedera.node.app.spi.metrics.StoreMetricsService; +import com.swirlds.common.RosterStateId; import com.swirlds.config.api.Configuration; +import com.swirlds.platform.state.service.WritableRosterStore; import com.swirlds.state.State; import com.swirlds.state.spi.WritableStates; import edu.umd.cs.findbugs.annotations.NonNull; @@ -103,7 +105,10 @@ private static Map, StoreEntry> createFactoryMap() { new StoreEntry(EntityIdService.NAME, (states, config, metrics) -> new WritableEntityIdStore(states))); // Schedule Service newMap.put(WritableScheduleStore.class, new StoreEntry(ScheduleService.NAME, WritableScheduleStoreImpl::new)); - newMap.put(WritableNodeStore.class, new StoreEntry(AddressBookService.NAME, WritableNodeStore::new)); + // Roster Service + newMap.put( + WritableRosterStore.class, + new StoreEntry(RosterStateId.NAME, (states, config, metrics) -> new WritableRosterStore(states))); return Collections.unmodifiableMap(newMap); } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulator.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulator.java index 8b82e80164df..30f5067ea474 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulator.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulator.java @@ -72,17 +72,19 @@ public synchronized boolean shouldThrottle(@NonNull TransactionInfo txnInfo, Sta * * @param queryFunction the functionality of the query * @param query the query to update the throttle requirements for + * @param state the current state of the node * @param queryPayerId the payer id of the query * @return whether the query should be throttled */ public synchronized boolean shouldThrottle( @NonNull final HederaFunctionality queryFunction, @NonNull final Query query, + @NonNull final State state, @Nullable AccountID queryPayerId) { requireNonNull(query); requireNonNull(queryFunction); setDecisionTime(instantSource.instant()); - return frontendThrottle.checkAndEnforceThrottle(queryFunction, lastDecisionTime, query, queryPayerId); + return frontendThrottle.checkAndEnforceThrottle(queryFunction, lastDecisionTime, query, state, queryPayerId); } private void setDecisionTime(@NonNull final Instant time) { diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java index 290151d42437..6be8929b1343 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java @@ -20,6 +20,7 @@ import static com.hedera.hapi.node.base.HederaFunctionality.CONTRACT_CALL_LOCAL; import static com.hedera.hapi.node.base.HederaFunctionality.CONTRACT_CREATE; import static com.hedera.hapi.node.base.HederaFunctionality.CRYPTO_CREATE; +import static com.hedera.hapi.node.base.HederaFunctionality.CRYPTO_GET_ACCOUNT_BALANCE; import static com.hedera.hapi.node.base.HederaFunctionality.CRYPTO_TRANSFER; import static com.hedera.hapi.node.base.HederaFunctionality.ETHEREUM_TRANSACTION; import static com.hedera.hapi.node.base.HederaFunctionality.TOKEN_ASSOCIATE_TO_ACCOUNT; @@ -171,6 +172,7 @@ public boolean checkAndEnforceThrottle( * @param queryFunction the functionality of the query * @param now the time at which the query is being processed * @param query the query to update the throttle requirements for + * @param state the current state of the node * @param queryPayerId the payer id of the query * @return whether the query should be throttled */ @@ -178,6 +180,7 @@ public boolean checkAndEnforceThrottle( @NonNull final HederaFunctionality queryFunction, @NonNull final Instant now, @NonNull final Query query, + @NonNull final State state, @Nullable final AccountID queryPayerId) { final var configuration = configProvider.getConfiguration(); if (throttleExempt(queryPayerId, configuration)) { @@ -197,13 +200,37 @@ public boolean checkAndEnforceThrottle( if (manager == null) { return true; } - if (!manager.allReqsMetAt(now)) { + + final boolean allReqMet; + if (queryFunction == CRYPTO_GET_ACCOUNT_BALANCE + && configuration.getConfigData(TokensConfig.class).countingGetBalanceThrottleEnabled()) { + final var accountStore = new ReadableStoreFactory(state).getStore(ReadableAccountStore.class); + final var tokenConfig = configuration.getConfigData(TokensConfig.class); + final int associationCount = + Math.clamp(getAssociationCount(query, accountStore), 1, tokenConfig.maxRelsPerInfoQuery()); + allReqMet = manager.allReqsMetAt(now, associationCount, ONE_TO_ONE); + } else { + allReqMet = manager.allReqsMetAt(now); + } + + if (!allReqMet) { reclaimLastAllowedUse(); return true; } return false; } + private int getAssociationCount(@NonNull final Query query, @NonNull final ReadableAccountStore accountStore) { + final var accountID = query.cryptogetAccountBalanceOrThrow().accountID(); + if (accountID != null) { + final var account = accountStore.getAccountById(accountID); + if (account != null) { + return account.numberAssociations(); + } + } + return 0; + } + /** * Updates the throttle requirements for given number of transactions of same functionality and returns whether they should be throttled. * diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/tss/FakeTssLibrary.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/PlaceholderTssLibrary.java similarity index 94% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/tss/FakeTssLibrary.java rename to hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/PlaceholderTssLibrary.java index 99e66d1e03ce..ed7c5216d646 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/tss/FakeTssLibrary.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/PlaceholderTssLibrary.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.hedera.services.bdd.junit.hedera.embedded.fakes.tss; +package com.hedera.node.app.tss; import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf; @@ -35,7 +35,8 @@ import java.util.List; import org.jetbrains.annotations.NotNull; -public class FakeTssLibrary implements TssLibrary { +public class PlaceholderTssLibrary implements TssLibrary { + public static final int DEFAULT_THRESHOLD = 10; public static final SignatureSchema SIGNATURE_SCHEMA = SignatureSchema.create(new byte[] {1}); private static final PairingPrivateKey AGGREGATED_PRIVATE_KEY = new PairingPrivateKey(new FakeFieldElement(BigInteger.valueOf(42L)), SIGNATURE_SCHEMA); @@ -45,7 +46,7 @@ public class FakeTssLibrary implements TssLibrary { private final int threshold; private byte[] message = new byte[0]; - public FakeTssLibrary(int threshold) { + public PlaceholderTssLibrary(int threshold) { if (threshold <= 0) { throw new IllegalArgumentException("Invalid threshold: " + threshold); } @@ -53,6 +54,10 @@ public FakeTssLibrary(int threshold) { this.threshold = threshold; } + public PlaceholderTssLibrary() { + this(DEFAULT_THRESHOLD); + } + @NotNull @Override public TssMessage generateTssMessage(@NotNull TssParticipantDirectory tssParticipantDirectory) { @@ -149,7 +154,7 @@ public boolean verifySignature( } // This method is not part of the TssLibrary interface, used for testing purposes - void setTestMessage(byte[] message) { + public void setTestMessage(byte[] message) { this.message = message; } } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java index d0ac740d1611..fbc131cef275 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java @@ -19,7 +19,7 @@ import com.hedera.hapi.node.state.roster.Roster; import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.tss.handlers.TssHandlers; -import com.hedera.node.app.tss.stores.ReadableTssBaseStore; +import com.hedera.node.app.tss.stores.ReadableTssStoreImpl; import com.hedera.pbj.runtime.io.buffer.Bytes; import com.swirlds.state.spi.Service; import edu.umd.cs.findbugs.annotations.NonNull; @@ -63,7 +63,7 @@ default String getServiceName() { * @param tssBaseStore the store to read the TSS base state from * @return the status of the TSS service */ - Status getStatus(@NonNull Roster roster, @NonNull Bytes ledgerId, @NonNull ReadableTssBaseStore tssBaseStore); + Status getStatus(@NonNull Roster roster, @NonNull Bytes ledgerId, @NonNull ReadableTssStoreImpl tssBaseStore); /** * Adopts the given roster for TSS operations. diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceComponent.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceComponent.java index 7b42888bb5cd..fb9333369c6f 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceComponent.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceComponent.java @@ -26,12 +26,14 @@ import javax.inject.Singleton; @Singleton -@Component() +@Component(modules = {TssModule.class}) public interface TssBaseServiceComponent { @Component.Factory interface Factory { TssBaseServiceComponent create( - @BindsInstance AppContext.Gossip gossip, @BindsInstance Executor submissionExecutor); + @BindsInstance AppContext.Gossip gossip, + @BindsInstance Executor submissionExecutor, + @BindsInstance @TssLibraryExecutor Executor libraryExecutor); } TssMessageHandler tssMessageHandler(); diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceImpl.java index 3d9363b7090f..1861c8c240fa 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceImpl.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseServiceImpl.java @@ -18,19 +18,26 @@ import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf; import static com.hedera.node.app.tss.TssBaseService.Status.PENDING_LEDGER_ID; +import static com.hedera.node.app.tss.handlers.TssUtils.computeTssParticipantDirectory; +import static com.hedera.node.app.tss.handlers.TssUtils.getTssMessages; +import static com.hedera.node.app.tss.handlers.TssUtils.validateTssMessages; import static java.util.Objects.requireNonNull; import com.hedera.hapi.node.state.roster.Roster; import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; import com.hedera.node.app.spi.AppContext; import com.hedera.node.app.spi.workflows.HandleContext; +import com.hedera.node.app.tss.api.TssLibrary; +import com.hedera.node.app.tss.api.TssPrivateShare; import com.hedera.node.app.tss.handlers.TssHandlers; import com.hedera.node.app.tss.handlers.TssSubmissions; import com.hedera.node.app.tss.schemas.V0560TssBaseSchema; -import com.hedera.node.app.tss.stores.ReadableTssBaseStore; +import com.hedera.node.app.tss.stores.ReadableTssStoreImpl; +import com.hedera.node.config.data.TssConfig; import com.hedera.pbj.runtime.io.buffer.Bytes; import com.swirlds.common.utility.CommonUtils; import com.swirlds.platform.roster.RosterUtils; +import com.swirlds.platform.state.service.ReadableRosterStore; import com.swirlds.state.spi.SchemaRegistry; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -39,6 +46,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; import org.apache.logging.log4j.LogManager; @@ -59,6 +67,8 @@ public class TssBaseServiceImpl implements TssBaseService { private final TssHandlers tssHandlers; private final TssSubmissions tssSubmissions; private final ExecutorService signingExecutor; + private final TssLibrary tssLibrary; + private final Executor tssLibraryExecutor; /** * The hash of the active roster being used to sign with the ledger private key. @@ -69,12 +79,17 @@ public class TssBaseServiceImpl implements TssBaseService { public TssBaseServiceImpl( @NonNull final AppContext appContext, @NonNull final ExecutorService signingExecutor, - @NonNull final Executor submissionExecutor) { + @NonNull final Executor submissionExecutor, + @NonNull final TssLibrary tssLibrary, + @NonNull final Executor tssLibraryExecutor) { requireNonNull(appContext); this.signingExecutor = requireNonNull(signingExecutor); - final var component = DaggerTssBaseServiceComponent.factory().create(appContext.gossip(), submissionExecutor); + final var component = DaggerTssBaseServiceComponent.factory() + .create(appContext.gossip(), submissionExecutor, tssLibraryExecutor); tssHandlers = new TssHandlers(component.tssMessageHandler(), component.tssVoteHandler()); tssSubmissions = component.tssSubmissions(); + this.tssLibrary = requireNonNull(tssLibrary); + this.tssLibraryExecutor = requireNonNull(tssLibraryExecutor); } @Override @@ -87,7 +102,7 @@ public void registerSchemas(@NonNull final SchemaRegistry registry) { public Status getStatus( @NonNull final Roster roster, @NonNull final Bytes ledgerId, - @NonNull final ReadableTssBaseStore tssBaseStore) { + @NonNull final ReadableTssStoreImpl tssBaseStore) { requireNonNull(roster); requireNonNull(ledgerId); requireNonNull(tssBaseStore); @@ -116,8 +131,61 @@ public void bootstrapLedgerId( @Override public void setCandidateRoster(@NonNull final Roster roster, @NonNull final HandleContext context) { requireNonNull(roster); - // (TSS-FUTURE) https://github.com/hashgraph/hedera-services/issues/14748 - tssSubmissions.submitTssMessage(TssMessageTransactionBody.DEFAULT, context); + + // (TSS-FUTURE) Implement `keyActiveRoster` + // https://github.com/hashgraph/hedera-services/issues/16166 + + // generate TSS messages based on the active roster and the candidate roster + final var tssStore = context.storeFactory().readableStore(ReadableTssStoreImpl.class); + final var maxSharesPerNode = + context.configuration().getConfigData(TssConfig.class).maxSharesPerNode(); + final var sourceRoster = + context.storeFactory().readableStore(ReadableRosterStore.class).getActiveRoster(); + final var activeRosterHash = RosterUtils.hash(sourceRoster).getBytes(); + final var candidateRosterHash = RosterUtils.hash(roster).getBytes(); + final var tssPrivateShares = + getTssPrivateShares(sourceRoster, maxSharesPerNode, tssStore, candidateRosterHash, context); + final var candidateRosterParticipantDirectory = computeTssParticipantDirectory(roster, maxSharesPerNode, (int) + context.networkInfo().selfNodeInfo().nodeId()); + + final AtomicInteger shareIndex = new AtomicInteger(0); + for (final var tssPrivateShare : tssPrivateShares) { + final var tssMsg = CompletableFuture.supplyAsync( + () -> tssLibrary.generateTssMessage(candidateRosterParticipantDirectory, tssPrivateShare), + tssLibraryExecutor) + .exceptionally(e -> { + log.error("Error generating tssMessage", e); + return null; + }); + tssMsg.thenAccept(msg -> { + if (msg == null) { + return; + } + final var tssMessage = TssMessageTransactionBody.newBuilder() + .sourceRosterHash(activeRosterHash) + .targetRosterHash(candidateRosterHash) + .shareIndex(shareIndex.getAndAdd(1)) + .tssMessage(Bytes.wrap(msg.bytes())) + .build(); + tssSubmissions.submitTssMessage(tssMessage, context); + }); + } + } + + @NonNull + private List getTssPrivateShares( + @NonNull final Roster sourceRoster, + final long maxSharesPerNode, + @NonNull final ReadableTssStoreImpl tssStore, + @NonNull final Bytes candidateRosterHash, + final HandleContext context) { + final var selfId = (int) context.networkInfo().selfNodeInfo().nodeId(); + final var activeRosterParticipantDirectory = + computeTssParticipantDirectory(sourceRoster, maxSharesPerNode, selfId); + final var validTssOps = validateTssMessages( + tssStore.getTssMessages(candidateRosterHash), activeRosterParticipantDirectory, tssLibrary); + final var validTssMessages = getTssMessages(validTssOps); + return tssLibrary.decryptPrivateShares(activeRosterParticipantDirectory, validTssMessages); } @Override diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssCryptographyManager.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssCryptographyManager.java new file mode 100644 index 000000000000..598b63bdbf35 --- /dev/null +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssCryptographyManager.java @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.tss; + +import static com.hedera.node.app.tss.handlers.TssUtils.getThresholdForTssMessages; +import static com.hedera.node.app.tss.handlers.TssUtils.getTssMessages; +import static com.hedera.node.app.tss.handlers.TssUtils.validateTssMessages; + +import com.hedera.hapi.node.state.tss.TssVoteMapKey; +import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; +import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; +import com.hedera.node.app.spi.AppContext; +import com.hedera.node.app.spi.workflows.HandleContext; +import com.hedera.node.app.tss.api.TssLibrary; +import com.hedera.node.app.tss.api.TssParticipantDirectory; +import com.hedera.node.app.tss.pairings.PairingPublicKey; +import com.hedera.node.app.tss.stores.WritableTssStore; +import com.swirlds.common.crypto.Signature; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.BitSet; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; +import javax.inject.Inject; +import javax.inject.Singleton; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * This is yet to be implemented + */ +@Singleton +public class TssCryptographyManager { + private static final Logger log = LogManager.getLogger(TssCryptographyManager.class); + private final TssLibrary tssLibrary; + private AppContext.Gossip gossip; + private Executor libraryExecutor; + + @Inject + public TssCryptographyManager( + @NonNull final TssLibrary tssLibrary, + @NonNull final AppContext.Gossip gossip, + @NonNull @TssLibraryExecutor final Executor libraryExecutor) { + this.tssLibrary = tssLibrary; + this.gossip = gossip; + this.libraryExecutor = libraryExecutor; + } + + /** + * Handles a TssMessageTransaction. + * This method validates the TssMessages and computes the ledger id if the threshold + * is met. + * Then signs the ledgerId with the node's RSA key and returns the signature with the computed ledgerID. + * If the threshold is not met, the method returns null. + * The most expensive operations involving {@link TssLibrary} are + * executed asynchronously. + * + * @param op the TssMessageTransaction + * @param tssParticipantDirectory the TSS participant directory + * @param context the handle context + * @return a CompletableFuture containing the ledger id and signature if the threshold is met, null otherwise + */ + public CompletableFuture handleTssMessageTransaction( + @NonNull final TssMessageTransactionBody op, + @NonNull final TssParticipantDirectory tssParticipantDirectory, + @NonNull final HandleContext context) { + final var tssStore = context.storeFactory().writableStore(WritableTssStore.class); + final var targetRosterHash = op.targetRosterHash(); + final var tssMessageBodies = tssStore.getTssMessages(targetRosterHash); + + final var isVoteSubmitted = tssStore.getVote(TssVoteMapKey.newBuilder() + .nodeId(context.networkInfo().selfNodeInfo().nodeId()) + .rosterHash(targetRosterHash) + .build()) + != null; + // If the node didn't submit a TssVoteTransaction, validate all TssMessages and compute the vote bit set + // to see if a threshold is met + if (!isVoteSubmitted) { + return computeAndSignLedgerIdIfApplicable(tssMessageBodies, tssParticipantDirectory) + .exceptionally(e -> { + log.error("Error computing public keys and signing", e); + return null; + }); + } + return CompletableFuture.completedFuture(null); + } + + /** + * Compute and sign the ledger id if the threshold is met. If the threshold is not met, return null. + * The most expensive operations involving {@link TssLibrary} are executed asynchronously. + * + * @param tssMessageBodies the list of TSS messages + * @param tssParticipantDirectory the TSS participant directory + * @return a CompletableFuture containing the ledger id and signature if the threshold is met, null otherwise + */ + private CompletableFuture computeAndSignLedgerIdIfApplicable( + @NonNull final List tssMessageBodies, + final TssParticipantDirectory tssParticipantDirectory) { + return CompletableFuture.supplyAsync( + () -> { + // Validate TSS transactions and set the vote bit set. + final var validTssOps = validateTssMessages(tssMessageBodies, tssParticipantDirectory, tssLibrary); + boolean tssMessageThresholdMet = isThresholdMet(validTssOps, tssParticipantDirectory); + + // If the threshold is not met, return + if (!tssMessageThresholdMet) { + return null; + } + final var validTssMessages = getTssMessages(validTssOps); + final var computedPublicShares = + tssLibrary.computePublicShares(tssParticipantDirectory, validTssMessages); + + // compute the ledger id and sign it + final var ledgerId = tssLibrary.aggregatePublicShares(computedPublicShares); + final var signature = gossip.sign(ledgerId.publicKey().toBytes()); + + final BitSet tssVoteBitSet = computeTssVoteBitSet(validTssOps); + return new LedgerIdWithSignature(ledgerId, signature, tssVoteBitSet); + }, + libraryExecutor); + } + + /** + * Compute the TSS vote bit set. No need to validate the TSS messages here as they have already been validated. + * + * @param validIssBodies the valid TSS messages + * @return the TSS vote bit set + */ + private BitSet computeTssVoteBitSet(@NonNull final List validIssBodies) { + final var tssVoteBitSet = new BitSet(); + for (TssMessageTransactionBody op : validIssBodies) { + tssVoteBitSet.set((int) op.shareIndex()); + } + return tssVoteBitSet; + } + + /** + * Check if the threshold consensus weight is met to submit a {@link TssVoteTransactionBody}. + * The threshold is met if more than half the consensus weight has been received. + * + * @param validTssMessages the valid TSS messages + * @param tssParticipantDirectory the TSS participant directory + * @return true if the threshold is met, false otherwise + */ + private boolean isThresholdMet( + @NonNull final List validTssMessages, + @NonNull final TssParticipantDirectory tssParticipantDirectory) { + final var numShares = tssParticipantDirectory.getShareIds().size(); + // If more than 1/2 the consensus weight has been received, then the threshold is met + return validTssMessages.size() >= getThresholdForTssMessages(numShares); + } + + /** + * A record containing the ledger id, signature, and TSS vote bit set to be used in generating {@link TssVoteTransactionBody}. + */ + public record LedgerIdWithSignature( + @NonNull PairingPublicKey ledgerId, @NonNull Signature signature, @NonNull BitSet tssVoteBitSet) {} +} diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssLibraryExecutor.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssLibraryExecutor.java new file mode 100644 index 000000000000..8c443b33a751 --- /dev/null +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssLibraryExecutor.java @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2023-2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.tss; + +import static java.lang.annotation.ElementType.METHOD; +import static java.lang.annotation.ElementType.PARAMETER; +import static java.lang.annotation.ElementType.TYPE; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; +import javax.inject.Qualifier; + +/** + * Identifies the executor for TSS library operations. + */ +@Target({METHOD, PARAMETER, TYPE}) +@Retention(RUNTIME) +@Documented +@Qualifier +public @interface TssLibraryExecutor {} diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssModule.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssModule.java new file mode 100644 index 000000000000..f02ec60a4902 --- /dev/null +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssModule.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2020-2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.tss; + +import com.hedera.node.app.spi.AppContext; +import com.hedera.node.app.tss.api.TssLibrary; +import dagger.Binds; +import dagger.Module; +import dagger.Provides; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.concurrent.Executor; +import javax.inject.Singleton; + +@Module +public interface TssModule { + @Provides + @Singleton + static TssCryptographyManager tssCryptographyManager( + @NonNull final AppContext.Gossip gossip, @NonNull @TssLibraryExecutor final Executor libraryExecutor) { + return new TssCryptographyManager(new PlaceholderTssLibrary(), gossip, libraryExecutor); + } + + @Binds + @Singleton + TssLibrary bindTssLibrary(PlaceholderTssLibrary fakeTssLibrary); +} diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/api/TssParticipantDirectory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/api/TssParticipantDirectory.java index 907dd75edc19..8145845c4f2a 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/api/TssParticipantDirectory.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/api/TssParticipantDirectory.java @@ -135,6 +135,18 @@ public int getThreshold() { return threshold; } + /** + * Returns the tss shares for all the nodes. + * @return the tss shares for all the nodes + */ + public Map> getSharesById() { + Map> sharesById = new HashMap<>(); + for (Entry entry : shareAllocationMap.entrySet()) { + sharesById.computeIfAbsent(entry.getValue(), k -> new ArrayList<>()).add(entry.getKey()); + } + return sharesById; + } + /** * Returns the shares owned by the participant represented as self. * diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssMessageHandler.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssMessageHandler.java index c5bdc358fb03..039fa5bbd2c3 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssMessageHandler.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssMessageHandler.java @@ -16,16 +16,24 @@ package com.hedera.node.app.tss.handlers; +import static com.hedera.node.app.tss.handlers.TssUtils.computeTssParticipantDirectory; import static java.util.Objects.requireNonNull; +import com.hedera.hapi.node.state.tss.TssMessageMapKey; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; +import com.hedera.node.app.spi.AppContext; import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.PreCheckException; import com.hedera.node.app.spi.workflows.PreHandleContext; import com.hedera.node.app.spi.workflows.TransactionHandler; +import com.hedera.node.app.tss.TssCryptographyManager; +import com.hedera.node.app.tss.stores.WritableTssStore; +import com.hedera.node.config.data.TssConfig; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.platform.state.service.ReadableRosterStore; import edu.umd.cs.findbugs.annotations.NonNull; import javax.inject.Inject; import javax.inject.Singleton; @@ -37,10 +45,17 @@ @Singleton public class TssMessageHandler implements TransactionHandler { private final TssSubmissions submissionManager; + private final AppContext.Gossip gossip; + private final TssCryptographyManager tssCryptographyManager; @Inject - public TssMessageHandler(@NonNull final TssSubmissions submissionManager) { + public TssMessageHandler( + @NonNull final TssSubmissions submissionManager, + @NonNull final AppContext.Gossip gossip, + @NonNull final TssCryptographyManager tssCryptographyManager) { this.submissionManager = requireNonNull(submissionManager); + this.gossip = requireNonNull(gossip); + this.tssCryptographyManager = requireNonNull(tssCryptographyManager); } @Override @@ -56,6 +71,44 @@ public void pureChecks(@NonNull final TransactionBody txn) throws PreCheckExcept @Override public void handle(@NonNull final HandleContext context) throws HandleException { requireNonNull(context); - submissionManager.submitTssVote(TssVoteTransactionBody.DEFAULT, context); + final var op = context.body().tssMessageOrThrow(); + + final var tssStore = context.storeFactory().writableStore(WritableTssStore.class); + final var rosterStore = context.storeFactory().readableStore(ReadableRosterStore.class); + final var maxSharesPerNode = + context.configuration().getConfigData(TssConfig.class).maxSharesPerNode(); + final var numberOfAlreadyExistingMessages = + tssStore.getTssMessages(op.targetRosterHash()).size(); + + // The sequence number starts from 0 and increments by 1 for each new message. + final var key = TssMessageMapKey.newBuilder() + .rosterHash(op.targetRosterHash()) + .sequenceNumber(numberOfAlreadyExistingMessages) + .build(); + // Each tss message is stored in the tss message state and is sent to CryptographyManager for further + // processing. + tssStore.put(key, op); + + final var tssParticipantDirectory = + computeTssParticipantDirectory(rosterStore.getActiveRoster(), maxSharesPerNode, (int) + context.networkInfo().selfNodeInfo().nodeId()); + final var result = tssCryptographyManager.handleTssMessageTransaction(op, tssParticipantDirectory, context); + + result.thenAccept(ledgerIdAndSignature -> { + if (ledgerIdAndSignature != null) { + final var signature = + gossip.sign(ledgerIdAndSignature.ledgerId().publicKey().toBytes()); + // FUTURE: Validate the ledgerId computed is same as the current ledgerId + final var tssVote = TssVoteTransactionBody.newBuilder() + .tssVote(Bytes.wrap(ledgerIdAndSignature.tssVoteBitSet().toByteArray())) + .targetRosterHash(op.targetRosterHash()) + .sourceRosterHash(op.sourceRosterHash()) + .nodeSignature(signature.getBytes()) + .ledgerId(Bytes.wrap( + ledgerIdAndSignature.ledgerId().publicKey().toBytes())) + .build(); + submissionManager.submitTssVote(tssVote, context); + } + }); } } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssUtils.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssUtils.java new file mode 100644 index 000000000000..af76ed18d414 --- /dev/null +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssUtils.java @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.tss.handlers; + +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; +import com.hedera.node.app.tss.api.TssLibrary; +import com.hedera.node.app.tss.api.TssMessage; +import com.hedera.node.app.tss.api.TssParticipantDirectory; +import com.hedera.node.app.tss.pairings.FakeFieldElement; +import com.hedera.node.app.tss.pairings.FakeGroupElement; +import com.hedera.node.app.tss.pairings.PairingPrivateKey; +import com.hedera.node.app.tss.pairings.PairingPublicKey; +import com.hedera.node.app.tss.pairings.SignatureSchema; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.math.BigInteger; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public class TssUtils { + /** + * Compute the TSS participant directory from the roster. + * + * @param roster the roster + * @param maxSharesPerNode the maximum number of shares per node + * @param selfNodeId the node ID of the current node + * @return the TSS participant directory + */ + public static TssParticipantDirectory computeTssParticipantDirectory( + @NonNull final Roster roster, final long maxSharesPerNode, final int selfNodeId) { + final var computedShares = computeNodeShares(roster.rosterEntries(), maxSharesPerNode); + final var totalShares = + computedShares.values().stream().mapToLong(Long::longValue).sum(); + final var threshold = getThresholdForTssMessages(totalShares); + + final var builder = TssParticipantDirectory.createBuilder().withThreshold(threshold); + // FUTURE: This private key must be loaded from disk + builder.withSelf( + selfNodeId, + new PairingPrivateKey( + new FakeFieldElement(BigInteger.valueOf(10L)), SignatureSchema.create(new byte[] {1}))); + for (var rosterEntry : roster.rosterEntries()) { + final int numSharesPerThisNode = + computedShares.get(rosterEntry.nodeId()).intValue(); + // FUTURE: Use the actual public key from the node + final var pairingPublicKey = new PairingPublicKey( + new FakeGroupElement(BigInteger.valueOf(10L)), SignatureSchema.create(new byte[] {1})); + builder.withParticipant((int) rosterEntry.nodeId(), numSharesPerThisNode, pairingPublicKey); + } + // FUTURE: Use the actual signature schema + return builder.build(SignatureSchema.create(new byte[] {1})); + } + + /** + * Compute the threshold of consensus weight needed for submitting a {@link com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody} + * If more than 1/2 the consensus weight has been received, then the threshold is met + * + * @param totalShares the total number of shares + * @return the threshold for TSS messages + */ + public static int getThresholdForTssMessages(final long totalShares) { + return (int) (totalShares + 2) / 2; + } + + /** + * Validate TSS messages using the TSS library. If the message is valid, add it to the list of valid TSS messages. + * + * @param tssMessages list of TSS messages to validate + * @param tssParticipantDirectory the participant directory + * @return list of valid TSS messages + */ + public static List validateTssMessages( + @NonNull final List tssMessages, + @NonNull final TssParticipantDirectory tssParticipantDirectory, + @NonNull final TssLibrary tssLibrary) { + final var validTssMessages = new LinkedList(); + for (final var op : tssMessages) { + final var isValid = tssLibrary.verifyTssMessage( + tssParticipantDirectory, new TssMessage(op.tssMessage().toByteArray())); + if (isValid) { + validTssMessages.add(op); + } + } + return validTssMessages; + } + + /** + * Get the TSS messages from the list of valid TSS Message bodies. + * + * @param validTssOps list of valid TSS message bodies + * @return list of TSS messages + */ + public static List getTssMessages(List validTssOps) { + return validTssOps.stream() + .map(TssMessageTransactionBody::tssMessage) + .map(k -> new TssMessage(k.toByteArray())) + .toList(); + } + + /** + * Compute the number of shares each node should have based on the weight of the node. + * + * @param rosterEntries the list of roster entries + * @param maxSharesPerNode the maximum number of shares per node + * @return a map of node ID to the number of shares + */ + public static Map computeNodeShares( + @NonNull final List rosterEntries, final long maxSharesPerNode) { + final var maxWeight = + rosterEntries.stream().mapToLong(RosterEntry::weight).max().orElse(0); + final var shares = new LinkedHashMap(); + for (final var entry : rosterEntries) { + final var numShares = ((maxSharesPerNode * entry.weight() + maxWeight - 1) / maxWeight); + shares.put(entry.nodeId(), numShares); + } + return shares; + } +} diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssVoteHandler.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssVoteHandler.java index d0d20c44b5ed..873d98d46461 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssVoteHandler.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/handlers/TssVoteHandler.java @@ -18,6 +18,9 @@ import static java.util.Objects.requireNonNull; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.hapi.node.state.tss.TssVoteMapKey; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; import com.hedera.node.app.spi.workflows.HandleContext; @@ -25,6 +28,9 @@ import com.hedera.node.app.spi.workflows.PreCheckException; import com.hedera.node.app.spi.workflows.PreHandleContext; import com.hedera.node.app.spi.workflows.TransactionHandler; +import com.hedera.node.app.tss.stores.WritableTssStore; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.platform.state.service.ReadableRosterStore; import edu.umd.cs.findbugs.annotations.NonNull; import javax.inject.Inject; import javax.inject.Singleton; @@ -35,6 +41,7 @@ */ @Singleton public class TssVoteHandler implements TransactionHandler { + @Inject public TssVoteHandler() { // Dagger2 @@ -53,5 +60,61 @@ public void pureChecks(@NonNull final TransactionBody txn) throws PreCheckExcept @Override public void handle(@NonNull final HandleContext context) throws HandleException { requireNonNull(context); + final var txBody = context.body().tssVoteOrThrow(); + final var tssBaseStore = context.storeFactory().writableStore(WritableTssStore.class); + final TssVoteMapKey tssVoteMapKey = new TssVoteMapKey( + txBody.targetRosterHash(), context.creatorInfo().nodeId()); + if (tssBaseStore.exists(tssVoteMapKey)) { + // Duplicate vote + return; + } + + if (!TssVoteHandler.hasReachedThreshold(txBody, context)) { + tssBaseStore.put(tssVoteMapKey, txBody); + } + } + + /** + * Check if the threshold number of votes (totaling at least 1/3 of weight) have already been received for the + * candidate roster, all with the same vote byte array. + * + * @param tssVoteTransaction the TssVoteTransaction to check + * @param context the HandleContext + * @return true if the threshold has been reached, false otherwise + */ + public static boolean hasReachedThreshold( + @NonNull final TssVoteTransactionBody tssVoteTransaction, @NonNull final HandleContext context) { + final var rosterStore = context.storeFactory().readableStore(ReadableRosterStore.class); + + final Roster activeRoster = rosterStore.getActiveRoster(); + if (activeRoster == null) { + throw new IllegalArgumentException("No active roster found"); + } + // Get the target roster from the TssVoteTransactionBody + final Bytes targetRosterHash = tssVoteTransaction.targetRosterHash(); + + // Also get the total active roster weight + long activeRosterTotalWeight = 0; + // Initialize a counter for the total weight of votes with the same vote byte array + long voteWeight = 0L; + final var tssBaseStore = context.storeFactory().writableStore(WritableTssStore.class); + // For every node in the active roster, check if there is a vote for the target roster hash + for (final RosterEntry rosterEntry : activeRoster.rosterEntries()) { + activeRosterTotalWeight += rosterEntry.weight(); + final var tssVoteMapKey = new TssVoteMapKey(targetRosterHash, rosterEntry.nodeId()); + if (tssBaseStore.exists(tssVoteMapKey)) { + final var vote = tssBaseStore.getVote(tssVoteMapKey); + // If the vote byte array matches the one in the TssVoteTransaction, add the weight of the vote to the + // counter + if (vote.tssVote().equals(tssVoteTransaction.tssVote())) { + voteWeight += rosterEntry.weight(); + } + } + } + + // Check if the total weight of votes with the same vote byte array is at least 1/3 of the + // total weight of the network + // Adding a +1 to the threshold to account for rounding errors. + return voteWeight >= (activeRosterTotalWeight / 3) + ((activeRosterTotalWeight % 3) == 0 ? 0 : 1); } } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStore.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStore.java index 7ad75dd826ff..b1b095451008 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStore.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStore.java @@ -20,7 +20,9 @@ import com.hedera.hapi.node.state.tss.TssVoteMapKey; import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; +import com.hedera.pbj.runtime.io.buffer.Bytes; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; public interface ReadableTssStore { /** @@ -61,4 +63,11 @@ public interface ReadableTssStore { * @return The number of entries in the tss message state. */ long messageStateSize(); + + /** + * Get the list of Tss messages for the given roster hash. + * @param rosterHash The roster hash to look up. + * @return The list of Tss messages, or an empty list if not found. + */ + List getTssMessages(Bytes rosterHash); } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssBaseStore.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStoreImpl.java similarity index 79% rename from hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssBaseStore.java rename to hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStoreImpl.java index ef30e0d68d3d..e09d018c903b 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssBaseStore.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/ReadableTssStoreImpl.java @@ -24,14 +24,17 @@ import com.hedera.hapi.node.state.tss.TssVoteMapKey; import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; +import com.hedera.pbj.runtime.io.buffer.Bytes; import com.swirlds.state.spi.ReadableKVState; import com.swirlds.state.spi.ReadableStates; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.List; /** * Provides read-only access to the TSS base store. */ -public class ReadableTssBaseStore implements ReadableTssStore { +public class ReadableTssStoreImpl implements ReadableTssStore { /** * The underlying data storage class that holds the airdrop data. */ @@ -40,11 +43,11 @@ public class ReadableTssBaseStore implements ReadableTssStore { private final ReadableKVState readableTssVoteState; /** - * Create a new {@link ReadableTssBaseStore} instance. + * Create a new {@link ReadableTssStoreImpl} instance. * * @param states The state to use. */ - public ReadableTssBaseStore(@NonNull final ReadableStates states) { + public ReadableTssStoreImpl(@NonNull final ReadableStates states) { requireNonNull(states); this.readableTssMessageState = states.get(TSS_MESSAGE_MAP_KEY); this.readableTssVoteState = states.get(TSS_VOTE_MAP_KEY); @@ -86,4 +89,15 @@ public boolean exists(@NonNull final TssVoteMapKey tssVoteKey) { public long messageStateSize() { return readableTssMessageState.size(); } + + @Override + public List getTssMessages(final Bytes rosterHash) { + final List tssMessages = new ArrayList<>(); + readableTssMessageState.keys().forEachRemaining(key -> { + if (key.rosterHash().equals(rosterHash)) { + tssMessages.add(readableTssMessageState.get(key)); + } + }); + return tssMessages; + } } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/WritableTssBaseStore.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/WritableTssStore.java similarity index 93% rename from hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/WritableTssBaseStore.java rename to hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/WritableTssStore.java index e320460e8715..032f60367757 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/WritableTssBaseStore.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/stores/WritableTssStore.java @@ -29,9 +29,9 @@ import edu.umd.cs.findbugs.annotations.NonNull; /** - * Extends the {@link ReadableTssBaseStore} with write access to the TSS base store. + * Extends the {@link ReadableTssStoreImpl} with write access to the TSS base store. */ -public class WritableTssBaseStore extends ReadableTssBaseStore { +public class WritableTssStore extends ReadableTssStoreImpl { /** * The underlying data storage class that holds the Pending Airdrops data. */ @@ -39,7 +39,7 @@ public class WritableTssBaseStore extends ReadableTssBaseStore { private final WritableKVState tssVoteState; - public WritableTssBaseStore(@NonNull final WritableStates states) { + public WritableTssStore(@NonNull final WritableStates states) { super(states); this.tssMessageState = states.get(TSS_MESSAGE_MAP_KEY); this.tssVoteState = states.get(TSS_VOTE_MAP_KEY); diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/OpWorkflowMetrics.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/OpWorkflowMetrics.java index 62ee0377066a..35765addfd26 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/OpWorkflowMetrics.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/OpWorkflowMetrics.java @@ -24,6 +24,7 @@ import com.swirlds.common.metrics.IntegerPairAccumulator; import com.swirlds.common.metrics.RunningAverageMetric; import com.swirlds.common.metrics.RunningAverageMetric.Config; +import com.swirlds.metrics.api.Counter; import com.swirlds.metrics.api.IntegerAccumulator; import com.swirlds.metrics.api.Metrics; import edu.umd.cs.findbugs.annotations.NonNull; @@ -45,7 +46,10 @@ public class OpWorkflowMetrics { .withDescription("average EVM gas used per second of consensus time") .withFormat("%,13.6f"); - private final Map transactionMetrics = + private final Map transactionDurationMetrics = + new EnumMap<>(HederaFunctionality.class); + + private final Map transactionThrottleMetrics = new EnumMap<>(HederaFunctionality.class); private final RunningAverageMetric gasPerConsSec; @@ -68,6 +72,8 @@ public OpWorkflowMetrics(@NonNull final Metrics metrics, @NonNull final ConfigPr } final var protoName = functionality.protoName(); final var name = protoName.substring(0, 1).toLowerCase() + protoName.substring(1); + + // initialize the transaction duration metrics final var maxConfig = new IntegerAccumulator.Config("app", name + "DurationMax") .withDescription("The maximum duration of a " + name + " transaction in nanoseconds") .withUnit("ns"); @@ -77,7 +83,13 @@ public OpWorkflowMetrics(@NonNull final Metrics metrics, @NonNull final ConfigPr .withDescription("The average duration of a " + name + " transaction in nanoseconds") .withUnit("ns"); final var avgMetric = metrics.getOrCreate(avgConfig); - transactionMetrics.put(functionality, new TransactionMetric(maxMetric, avgMetric)); + transactionDurationMetrics.put(functionality, new TransactionMetric(maxMetric, avgMetric)); + + // initialize the transaction throttle metrics + final var throttledConfig = new Counter.Config("app", name + "ThrottledTxns") + .withDescription( + "The number of " + name + " transactions that were rejected due to throttle limits"); + transactionThrottleMetrics.put(functionality, metrics.getOrCreate(throttledConfig)); } final StatsConfig statsConfig = configProvider.getConfiguration().getConfigData(StatsConfig.class); @@ -85,7 +97,7 @@ public OpWorkflowMetrics(@NonNull final Metrics metrics, @NonNull final ConfigPr } /** - * Update the metrics for the given functionality + * Update the transaction duration metrics for the given functionality * * @param functionality the {@link HederaFunctionality} for which the metrics will be updated * @param duration the duration of the operation in {@code ns} @@ -95,7 +107,7 @@ public void updateDuration(@NonNull final HederaFunctionality functionality, fin if (functionality == HederaFunctionality.NONE) { return; } - final var metric = transactionMetrics.get(functionality); + final var metric = transactionDurationMetrics.get(functionality); if (metric != null) { // We do not synchronize the update of the metrics. This may lead to a situation where the max value is // is stored in one reporting interval and the average in another. This is acceptable as synchronizing @@ -105,6 +117,23 @@ public void updateDuration(@NonNull final HederaFunctionality functionality, fin } } + /** + * Increment the throttled metrics for the given functionality, to track the number of transactions per second that + * failed due to throttling + * + * @param functionality the {@link HederaFunctionality} for which the throttled metrics will be updated + */ + public void incrementThrottled(@NonNull final HederaFunctionality functionality) { + requireNonNull(functionality, "functionality must not be null"); + if (functionality == HederaFunctionality.NONE) { + return; + } + final var metric = transactionThrottleMetrics.get(functionality); + if (metric != null) { + metric.increment(); + } + } + public void switchConsensusSecond() { gasPerConsSec.update(gasUsedThisConsensusSecond); gasUsedThisConsensusSecond = 0L; diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchHandleContext.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchHandleContext.java index 940c1f2e9328..92e9d42e40f1 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchHandleContext.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchHandleContext.java @@ -463,6 +463,11 @@ public Map dispatchPaidRewards() { return dispatchPaidRewards == null ? emptyMap() : dispatchPaidRewards; } + @Override + public NodeInfo creatorInfo() { + return creatorInfo; + } + private T dispatchForRecord( @NonNull final TransactionBody childTxBody, @NonNull final Class recordBuilderClass, diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchProcessor.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchProcessor.java index 1c3425037bfb..82d561875567 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchProcessor.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/DispatchProcessor.java @@ -40,6 +40,7 @@ import com.hedera.node.app.spi.authorization.Authorizer; import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.record.StreamBuilder; +import com.hedera.node.app.workflows.OpWorkflowMetrics; import com.hedera.node.app.workflows.dispatcher.TransactionDispatcher; import com.hedera.node.app.workflows.handle.dispatch.DispatchValidator; import com.hedera.node.app.workflows.handle.dispatch.RecordFinalizer; @@ -78,6 +79,7 @@ public class DispatchProcessor { private final TransactionDispatcher dispatcher; private final EthereumTransactionHandler ethereumTransactionHandler; private final NetworkInfo networkInfo; + private final OpWorkflowMetrics workflowMetrics; @Inject public DispatchProcessor( @@ -90,7 +92,8 @@ public DispatchProcessor( @NonNull final ExchangeRateManager exchangeRateManager, @NonNull final TransactionDispatcher dispatcher, @NonNull final EthereumTransactionHandler ethereumTransactionHandler, - final NetworkInfo networkInfo) { + @NonNull final NetworkInfo networkInfo, + @NonNull final OpWorkflowMetrics workflowMetrics) { this.authorizer = requireNonNull(authorizer); this.validator = requireNonNull(validator); this.recordFinalizer = requireNonNull(recordFinalizer); @@ -101,6 +104,7 @@ public DispatchProcessor( this.dispatcher = requireNonNull(dispatcher); this.ethereumTransactionHandler = requireNonNull(ethereumTransactionHandler); this.networkInfo = requireNonNull(networkInfo); + this.workflowMetrics = requireNonNull(workflowMetrics); } /** @@ -140,7 +144,6 @@ public void processDispatch(@NonNull final Dispatch dispatch) { * * @param dispatch the dispatch to be processed * @param validationResult the due diligence report for the dispatch - * @return the work done by the dispatch */ private void tryHandle(@NonNull final Dispatch dispatch, @NonNull final ValidationResult validationResult) { try { @@ -162,8 +165,10 @@ private void tryHandle(@NonNull final Dispatch dispatch, @NonNull final Validati // Since there is no easy way to say how much work was done in the failed dispatch, // and current throttling is very rough-grained, we just return USER_TRANSACTION here } catch (final ThrottleException e) { + final var functionality = dispatch.txnInfo().functionality(); + workflowMetrics.incrementThrottled(functionality); rollbackAndRechargeFee(dispatch, validationResult, e.getStatus()); - if (dispatch.txnInfo().functionality() == ETHEREUM_TRANSACTION) { + if (functionality == ETHEREUM_TRANSACTION) { ethereumTransactionHandler.handleThrottled(dispatch.handleContext()); } } catch (final Exception e) { diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactory.java index 42fd91619eed..8692dc12b39a 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactory.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactory.java @@ -22,6 +22,7 @@ import static com.hedera.node.app.workflows.handle.throttle.DispatchUsageManager.CONTRACT_OPERATIONS; import static com.hedera.node.app.workflows.prehandle.PreHandleResult.Status.PRE_HANDLE_FAILURE; import static com.hedera.node.app.workflows.prehandle.PreHandleResult.Status.SO_FAR_SO_GOOD; +import static java.util.Collections.emptyMap; import static java.util.Objects.requireNonNull; import com.hedera.hapi.node.base.AccountID; @@ -44,7 +45,7 @@ import com.hedera.node.app.service.token.api.TokenServiceApi; import com.hedera.node.app.services.ServiceScopeLookup; import com.hedera.node.app.signature.AppKeyVerifier; -import com.hedera.node.app.signature.DelegateKeyVerifier; +import com.hedera.node.app.signature.DefaultKeyVerifier; import com.hedera.node.app.signature.impl.SignatureVerificationImpl; import com.hedera.node.app.spi.authorization.Authorizer; import com.hedera.node.app.spi.fees.FeeContext; @@ -75,6 +76,7 @@ import com.hedera.node.app.workflows.prehandle.PreHandleContextImpl; import com.hedera.node.app.workflows.prehandle.PreHandleResult; import com.hedera.node.config.data.BlockStreamConfig; +import com.hedera.node.config.data.HederaConfig; import com.hedera.pbj.runtime.io.buffer.Bytes; import com.swirlds.config.api.Configuration; import com.swirlds.state.spi.info.NetworkInfo; @@ -162,7 +164,7 @@ public Dispatch createChildDispatch( @NonNull final BlockRecordInfo blockRecordInfo, @NonNull final HandleContext.ConsensusThrottling throttleStrategy) { final var preHandleResult = preHandleChild(txBody, syntheticPayerId, config, readableStoreFactory); - final var childVerifier = getKeyVerifier(callback); + final var childVerifier = getKeyVerifier(callback, config); final var childTxnInfo = getTxnInfoFrom(syntheticPayerId, txBody); final var streamMode = config.getConfigData(BlockStreamConfig.class).streamMode(); final var childStack = @@ -388,36 +390,47 @@ public int numSignaturesVerified() { /** * Returns a {@link AppKeyVerifier} based on the callback. If the callback is null, then it returns a - * {@link NoOpKeyVerifier}. Otherwise, it returns a {@link DelegateKeyVerifier} with the callback. - * The callback is null if the signature verification is not required. This is the case for hollow account - * completion and auto account creation. + * {@link NoOpKeyVerifier}. Otherwise, it returns a verifier that forwards calls to + * {@link AppKeyVerifier#verificationFor(Key)} to a + * {@link DefaultKeyVerifier#verificationFor(Key, VerificationAssistant)} with a verification assistant + * returns true exactly when the callback returns true for its key. + *

+ * A null callback is useful for internal dispatches that do not need further signature verifications; + * for example, hollow account completion and auto account creation. * * @param callback the callback + * @param config the configuration * @return the key verifier */ - public static AppKeyVerifier getKeyVerifier(@Nullable Predicate callback) { + public static AppKeyVerifier getKeyVerifier( + @Nullable final Predicate callback, @NonNull final Configuration config) { return callback == null ? NO_OP_KEY_VERIFIER : new AppKeyVerifier() { - private final AppKeyVerifier verifier = new DelegateKeyVerifier(callback); + private final AppKeyVerifier verifier = + new DefaultKeyVerifier(0, config.getConfigData(HederaConfig.class), emptyMap()); @NonNull @Override public SignatureVerification verificationFor(@NonNull final Key key) { - return callback.test(key) ? NoOpKeyVerifier.PASSED_VERIFICATION : verifier.verificationFor(key); + // Within the child HandleContext, a key structure has a valid signature ONLY if + // the given callback returns true for enough primitive keys in the structure + return verifier.verificationFor(key, (k, v) -> callback.test(k)); } @NonNull @Override public SignatureVerification verificationFor( @NonNull final Key key, @NonNull final VerificationAssistant callback) { - throw new UnsupportedOperationException("Should never be called!"); + // We do not yet support signing scheduled transactions from within the EVM + throw new UnsupportedOperationException(); } @NonNull @Override public SignatureVerification verificationFor(@NonNull final Bytes evmAlias) { - throw new UnsupportedOperationException("Should never be called!"); + // We do not yet support completing hollow accounts from an internal dispatch + throw new UnsupportedOperationException(); } @Override diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdates.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdates.java index 086b3ed6978c..af1fc0e5a357 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdates.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdates.java @@ -22,20 +22,27 @@ import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; -import com.hedera.hapi.node.state.roster.Roster; import com.hedera.node.app.fees.ExchangeRateManager; import com.hedera.node.app.records.ReadableBlockRecordStore; +import com.hedera.node.app.service.addressbook.ReadableNodeStore; import com.hedera.node.app.service.token.impl.handlers.staking.EndOfStakingPeriodUpdater; import com.hedera.node.app.service.token.records.TokenContext; +import com.hedera.node.app.spi.metrics.StoreMetricsService; +import com.hedera.node.app.spi.workflows.HandleContext; +import com.hedera.node.app.store.WritableStoreFactory; import com.hedera.node.app.tss.TssBaseService; import com.hedera.node.app.workflows.handle.Dispatch; import com.hedera.node.app.workflows.handle.stack.SavepointStackImpl; import com.hedera.node.config.data.StakingConfig; import com.hedera.node.config.data.TssConfig; import com.hedera.node.config.types.StreamMode; +import com.swirlds.common.RosterStateId; +import com.swirlds.config.api.Configuration; +import com.swirlds.platform.state.service.WritableRosterStore; import edu.umd.cs.findbugs.annotations.NonNull; import java.time.Instant; import java.time.LocalDate; +import java.util.Objects; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.logging.log4j.LogManager; @@ -54,15 +61,18 @@ public class NodeStakeUpdates { private final EndOfStakingPeriodUpdater stakingCalculator; private final ExchangeRateManager exchangeRateManager; private final TssBaseService tssBaseService; + private final StoreMetricsService storeMetricsService; @Inject public NodeStakeUpdates( @NonNull final EndOfStakingPeriodUpdater stakingPeriodCalculator, @NonNull final ExchangeRateManager exchangeRateManager, - @NonNull final TssBaseService tssBaseService) { + @NonNull final TssBaseService tssBaseService, + @NonNull final StoreMetricsService storeMetricsService) { this.stakingCalculator = requireNonNull(stakingPeriodCalculator); this.exchangeRateManager = requireNonNull(exchangeRateManager); this.tssBaseService = requireNonNull(tssBaseService); + this.storeMetricsService = requireNonNull(storeMetricsService); } /** @@ -70,6 +80,10 @@ public NodeStakeUpdates( * rewards. This should only be done during handling of the first transaction of each new staking * period, which staking period usually starts at midnight UTC. * + *

Given successful processing of staking updates, and keying candidate TSS rosters is enabled, + * this time hook also signals to the {@link TssBaseService} that a new candidate roster – constructed + * from the new weights calculated herein for staking – is available. + * * @param dispatch the dispatch * @param stack the savepoint stack * @param tokenContext the token context @@ -129,12 +143,16 @@ public void process( logger.error("CATASTROPHIC failure updating end-of-day stakes", e); stack.rollbackFullStack(); } + + // If enabled, initialize a new candidate roster final var config = tokenContext.configuration(); final var tssConfig = config.getConfigData(TssConfig.class); if (tssConfig.keyCandidateRoster()) { - final var context = dispatch.handleContext(); - // C.f. https://github.com/hashgraph/hedera-services/issues/14748 - tssBaseService.setCandidateRoster(Roster.DEFAULT, context); + // We can't use the handle context to retrieve a WritableRosterStore object because + // the handle context is only scoped to the token service, so we use the + // `newWritableRosterStore` method here instead + final var rosterStore = newWritableRosterStore(stack, config); + keyNewRoster(dispatch.handleContext(), rosterStore); } } } @@ -154,6 +172,24 @@ public static boolean isNextStakingPeriod( } } + private void keyNewRoster( + @NonNull final HandleContext handleContext, @NonNull final WritableRosterStore rosterStore) { + final var nodeStore = handleContext.storeFactory().readableStore(ReadableNodeStore.class); + final var newCandidateRoster = nodeStore.newRosterFromNodes(); + + if (!Objects.equals(newCandidateRoster, rosterStore.getCandidateRoster()) + && !Objects.equals(newCandidateRoster, rosterStore.getActiveRoster())) { + rosterStore.putCandidateRoster(newCandidateRoster); + tssBaseService.setCandidateRoster(newCandidateRoster, handleContext); + } + } + + private WritableRosterStore newWritableRosterStore( + @NonNull final SavepointStackImpl stack, @NonNull final Configuration config) { + final var writableFactory = new WritableStoreFactory(stack, RosterStateId.NAME, config, storeMetricsService); + return writableFactory.getStore(WritableRosterStore.class); + } + private static boolean isLaterUtcDay(@NonNull final Instant now, @NonNull final Instant then) { final var nowDay = LocalDate.ofInstant(now, UTC); final var thenDay = LocalDate.ofInstant(then, UTC); diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/ingest/IngestChecker.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/ingest/IngestChecker.java index 9c447d9a52cf..57adf5efcb9c 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/ingest/IngestChecker.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/ingest/IngestChecker.java @@ -55,6 +55,7 @@ import com.hedera.node.app.state.DeduplicationCache; import com.hedera.node.app.store.ReadableStoreFactory; import com.hedera.node.app.throttle.SynchronizedThrottleAccumulator; +import com.hedera.node.app.workflows.OpWorkflowMetrics; import com.hedera.node.app.workflows.SolvencyPreCheck; import com.hedera.node.app.workflows.TransactionChecker; import com.hedera.node.app.workflows.TransactionChecker.RequireMinValidLifetimeBuffer; @@ -97,6 +98,7 @@ public final class IngestChecker { private final Authorizer authorizer; private final SynchronizedThrottleAccumulator synchronizedThrottleAccumulator; private final InstantSource instantSource; + private final OpWorkflowMetrics workflowMetrics; /** * Constructor of the {@code IngestChecker} @@ -111,6 +113,7 @@ public final class IngestChecker { * @param feeManager the {@link FeeManager} that manages {@link com.hedera.node.app.spi.fees.FeeCalculator}s * @param synchronizedThrottleAccumulator the {@link SynchronizedThrottleAccumulator} that checks transaction should be throttled * @param instantSource the {@link InstantSource} that provides the current time + * @param workflowMetrics the {@link OpWorkflowMetrics} that manages the metrics for all operations * @throws NullPointerException if one of the arguments is {@code null} */ @Inject @@ -126,7 +129,8 @@ public IngestChecker( @NonNull final FeeManager feeManager, @NonNull final Authorizer authorizer, @NonNull final SynchronizedThrottleAccumulator synchronizedThrottleAccumulator, - @NonNull final InstantSource instantSource) { + @NonNull final InstantSource instantSource, + @NonNull final OpWorkflowMetrics workflowMetrics) { this.nodeAccount = requireNonNull(nodeAccount, "nodeAccount must not be null"); this.currentPlatformStatus = requireNonNull(currentPlatformStatus, "currentPlatformStatus must not be null"); this.transactionChecker = requireNonNull(transactionChecker, "transactionChecker must not be null"); @@ -139,6 +143,7 @@ public IngestChecker( this.authorizer = requireNonNull(authorizer, "authorizer must not be null"); this.synchronizedThrottleAccumulator = requireNonNull(synchronizedThrottleAccumulator); this.instantSource = requireNonNull(instantSource); + this.workflowMetrics = requireNonNull(workflowMetrics); } /** @@ -193,10 +198,9 @@ public TransactionInfo runAllChecks( // 4. Check throttles assertThrottlingPreconditions(txInfo, configuration); final var hederaConfig = configuration.getConfigData(HederaConfig.class); - if (hederaConfig.ingestThrottleEnabled()) { - if (synchronizedThrottleAccumulator.shouldThrottle(txInfo, state)) { - throw new PreCheckException(BUSY); - } + if (hederaConfig.ingestThrottleEnabled() && synchronizedThrottleAccumulator.shouldThrottle(txInfo, state)) { + workflowMetrics.incrementThrottled(functionality); + throw new PreCheckException(BUSY); } // 4a. Run pure checks diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImpl.java index b0c53dfbdf5b..0496a9c59f92 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImpl.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImpl.java @@ -16,6 +16,8 @@ package com.hedera.node.app.workflows.prehandle; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_PAYER_ACCOUNT_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.UNRESOLVABLE_REQUIRED_SIGNERS; import static com.hedera.hapi.util.HapiUtils.EMPTY_KEY_LIST; import static com.hedera.hapi.util.HapiUtils.isHollow; import static com.hedera.node.app.service.token.impl.util.TokenHandlerHelper.verifyNotEmptyKey; @@ -62,7 +64,7 @@ public class PreHandleContextImpl implements PreHandleContext { /** * The payer account ID. Specified in the transaction body, extracted and stored separately for convenience. */ - private final AccountID payer; + private final AccountID payerId; /** * The payer's key, as found in state */ @@ -128,32 +130,28 @@ public PreHandleContextImpl( } /** - * Create a new instance + * Create a new instance of {@link PreHandleContextImpl}. + * @throws PreCheckException if the payer account does not exist */ private PreHandleContextImpl( @NonNull final ReadableStoreFactory storeFactory, @NonNull final TransactionBody txn, - @NonNull final AccountID payer, + @NonNull final AccountID payerId, @NonNull final Configuration configuration, @NonNull final TransactionDispatcher dispatcher, final boolean isUserTx) throws PreCheckException { this.storeFactory = requireNonNull(storeFactory, "storeFactory must not be null."); this.txn = requireNonNull(txn, "txn must not be null!"); - this.payer = requireNonNull(payer, "payer msut not be null!"); + this.payerId = requireNonNull(payerId, "payer must not be null!"); this.configuration = requireNonNull(configuration, "configuration must not be null!"); this.dispatcher = requireNonNull(dispatcher, "dispatcher must not be null!"); this.isUserTx = isUserTx; - this.accountStore = storeFactory.getStore(ReadableAccountStore.class); - - // Find the account, which must exist or throw a PreCheckException with the given response code. - final var account = accountStore.getAccountById(payer); - mustExist(account, ResponseCodeEnum.INVALID_PAYER_ACCOUNT_ID); - // NOTE: While it is true that the key can be null on some special accounts like - // account 800, those accounts cannot be the payer. - payerKey = account.key(); - mustExist(payerKey, ResponseCodeEnum.INVALID_PAYER_ACCOUNT_ID); + // Find the account, which must exist or throw on construction + final var payer = mustExist(accountStore.getAccountById(payerId), INVALID_PAYER_ACCOUNT_ID); + // It would be a catastrophic invariant failure if an account in state didn't have a key + payerKey = payer.keyOrThrow(); } @Override @@ -171,7 +169,7 @@ public TransactionBody body() { @Override @NonNull public AccountID payer() { - return payer; + return payerId; } @Override @@ -310,7 +308,7 @@ public PreHandleContext requireAliasedKeyOrThrow( // If we repeated the payer requirement, we would be requiring "double authorization" from // the contract doing the dispatch; but the contract has already authorized the action by // the very execution of its bytecode. - if (accountID.equals(payer)) { + if (accountID.equals(payerId)) { return this; } final Account account; @@ -330,7 +328,7 @@ public PreHandleContext requireAliasedKeyOrThrow( } // Verify this key isn't for an immutable account verifyNotStakingAccounts(account.accountIdOrThrow(), responseCode); - final var key = account.key(); + final var key = account.keyOrThrow(); if (!isValid(key)) { // Or if it is a Contract Key? Or if it is an empty key? // Or a KeyList with no // keys? Or KeyList with Contract keys only? @@ -478,18 +476,20 @@ public PreHandleContext requireSignatureForHollowAccountCreation(@NonNull final @NonNull @Override - public TransactionKeys allKeysForTransaction( - @NonNull TransactionBody nestedTxn, @NonNull final AccountID payerForNested) throws PreCheckException { - dispatcher.dispatchPureChecks(nestedTxn); - final var nestedContext = - new PreHandleContextImpl(storeFactory, nestedTxn, payerForNested, configuration, dispatcher); + public TransactionKeys allKeysForTransaction(@NonNull TransactionBody body, @NonNull final AccountID payerId) + throws PreCheckException { + // Throws PreCheckException if the transaction body is structurally invalid + dispatcher.dispatchPureChecks(body); + // Throws PreCheckException if the payer account does not exist + final var context = new PreHandleContextImpl(storeFactory, body, payerId, configuration, dispatcher); try { - dispatcher.dispatchPreHandle(nestedContext); + // Accumulate all required keys in the context + dispatcher.dispatchPreHandle(context); } catch (final PreCheckException ignored) { - // We must ignore/translate the exception here, as this is key gathering, not transaction validation. - throw new PreCheckException(ResponseCodeEnum.UNRESOLVABLE_REQUIRED_SIGNERS); + // Translate all prehandle failures to unresolvable required signers + throw new PreCheckException(UNRESOLVABLE_REQUIRED_SIGNERS); } - return nestedContext; + return context; } @Override @@ -512,8 +512,8 @@ public PreHandleContext innerContext() { public String toString() { return "PreHandleContextImpl{" + "accountStore=" + accountStore + ", txn=" - + txn + ", payer=" - + payer + ", payerKey=" + + txn + ", payerId=" + + payerId + ", payerKey=" + payerKey + ", requiredNonPayerKeys=" + requiredNonPayerKeys + ", innerContext=" + innerContext + ", storeFactory=" diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/query/QueryWorkflowImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/query/QueryWorkflowImpl.java index 07b6b55e7f10..cc4edf0ecdc4 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/query/QueryWorkflowImpl.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/query/QueryWorkflowImpl.java @@ -124,6 +124,7 @@ public final class QueryWorkflowImpl implements QueryWorkflow { * @param feeManager the {@link FeeManager} to calculate the fees * @param synchronizedThrottleAccumulator the {@link SynchronizedThrottleAccumulator} that checks transaction should be throttled * @param instantSource the {@link InstantSource} to get the current time + * @param workflowMetrics the {@link OpWorkflowMetrics} to update the metrics * @param shouldCharge If the workflow should charge for handling queries. * @throws NullPointerException if one of the arguments is {@code null} */ @@ -269,7 +270,8 @@ public void handleQuery(@NonNull final Bytes requestBuffer, @NonNull final Buffe handler.validate(context); // 5. Check query throttles - if (shouldCharge && synchronizedThrottleAccumulator.shouldThrottle(function, query, payerID)) { + if (shouldCharge && synchronizedThrottleAccumulator.shouldThrottle(function, query, state, payerID)) { + workflowMetrics.incrementThrottled(function); throw new PreCheckException(BUSY); } diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java index e6bee9c7277a..66aa82e02a2e 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java +++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java @@ -28,6 +28,7 @@ import com.hedera.node.app.signature.impl.SignatureExpanderImpl; import com.hedera.node.app.signature.impl.SignatureVerifierImpl; import com.hedera.node.app.state.recordcache.LegacyListRecordSource; +import com.hedera.node.app.tss.PlaceholderTssLibrary; import com.hedera.node.app.tss.TssBaseServiceImpl; import com.hedera.node.config.data.HederaConfig; import com.swirlds.common.crypto.CryptographyHolder; @@ -94,8 +95,12 @@ private ExecutorComponent newExecutorComponent( new SignatureExpanderImpl(), new SignatureVerifierImpl(CryptographyHolder.get())), UNAVAILABLE_GOSSIP); - final var tssBaseService = - new TssBaseServiceImpl(appContext, ForkJoinPool.commonPool(), ForkJoinPool.commonPool()); + final var tssBaseService = new TssBaseServiceImpl( + appContext, + ForkJoinPool.commonPool(), + ForkJoinPool.commonPool(), + new PlaceholderTssLibrary(), + ForkJoinPool.commonPool()); final var contractService = new ContractServiceImpl(appContext, NOOP_VERIFICATION_STRATEGIES, tracerBinding); final var fileService = new FileServiceImpl(); final var configProvider = new ConfigProviderImpl(false, null, properties); diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/RosterServiceImplTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/RosterServiceTest.java similarity index 85% rename from hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/RosterServiceImplTest.java rename to hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/RosterServiceTest.java index eef73d417b83..749ec29e1e8a 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/RosterServiceImplTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/RosterServiceTest.java @@ -21,7 +21,8 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import com.hedera.node.app.roster.schemas.V0540RosterSchema; +import com.swirlds.common.RosterStateId; +import com.swirlds.platform.state.service.schemas.V0540RosterSchema; import com.swirlds.state.spi.Schema; import com.swirlds.state.spi.SchemaRegistry; import org.assertj.core.api.Assertions; @@ -30,19 +31,19 @@ import org.mockito.ArgumentCaptor; /** - * Unit tests for {@link RosterServiceImpl}. + * Unit tests for {@link RosterService}. */ -class RosterServiceImplTest { - private RosterServiceImpl rosterService; +class RosterServiceTest { + private RosterService rosterService; @BeforeEach void setUp() { - rosterService = new RosterServiceImpl(); + rosterService = new RosterService(); } @Test void defaultConstructor() { - assertThat(new RosterServiceImpl()).isNotNull(); + assertThat(new RosterService()).isNotNull(); } @Test @@ -65,6 +66,6 @@ void registerSchemasRegistersTokenSchema() { @Test void testServiceNameReturnsCorrectName() { - assertThat(rosterService.getServiceName()).isEqualTo(RosterServiceImpl.NAME); + assertThat(rosterService.getServiceName()).isEqualTo(RosterStateId.NAME); } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/schemas/V0540RosterSchemaTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/schemas/V0540RosterSchemaTest.java index 97e6a9e0e8d1..d2e230cb9b63 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/schemas/V0540RosterSchemaTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/roster/schemas/V0540RosterSchemaTest.java @@ -16,8 +16,8 @@ package com.hedera.node.app.roster.schemas; -import static com.hedera.node.app.roster.schemas.V0540RosterSchema.ROSTER_KEY; -import static com.hedera.node.app.roster.schemas.V0540RosterSchema.ROSTER_STATES_KEY; +import static com.swirlds.common.RosterStateId.ROSTER_KEY; +import static com.swirlds.common.RosterStateId.ROSTER_STATES_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; @@ -28,6 +28,7 @@ import com.hedera.hapi.node.base.SemanticVersion; import com.hedera.hapi.node.state.roster.RosterState; import com.hedera.node.app.spi.fixtures.util.LoggingSubject; +import com.swirlds.platform.state.service.schemas.V0540RosterSchema; import com.swirlds.state.spi.MigrationContext; import com.swirlds.state.spi.StateDefinition; import com.swirlds.state.spi.WritableSingletonState; @@ -68,8 +69,7 @@ void registersExpectedRosterSchema() { @DisplayName("For this version, migrate from existing state version returns default.") void testMigrateFromNullRosterStateReturnsDefault() { when(migrationContext.newStates()).thenReturn(mock(WritableStates.class)); - when(migrationContext.newStates().getSingleton(V0540RosterSchema.ROSTER_STATES_KEY)) - .thenReturn(rosterState); + when(migrationContext.newStates().getSingleton(ROSTER_STATES_KEY)).thenReturn(rosterState); subject.migrate(migrationContext); verify(rosterState, times(1)).put(RosterState.DEFAULT); @@ -79,8 +79,7 @@ void testMigrateFromNullRosterStateReturnsDefault() { @DisplayName("Migrate from older state version returns default.") void testMigrateFromPreviousStateVersion() { when(migrationContext.newStates()).thenReturn(mock(WritableStates.class)); - when(migrationContext.newStates().getSingleton(V0540RosterSchema.ROSTER_STATES_KEY)) - .thenReturn(rosterState); + when(migrationContext.newStates().getSingleton(ROSTER_STATES_KEY)).thenReturn(rosterState); when(migrationContext.previousVersion()) .thenReturn( SemanticVersion.newBuilder().major(0).minor(53).patch(0).build()); diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DefaultKeyVerifierTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DefaultKeyVerifierTest.java index 03be40a5c2e6..68a9f96013e6 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DefaultKeyVerifierTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DefaultKeyVerifierTest.java @@ -42,6 +42,7 @@ import com.hedera.hapi.node.base.KeyList; import com.hedera.hapi.node.base.ThresholdKey; import com.hedera.node.app.signature.impl.SignatureVerificationImpl; +import com.hedera.node.app.spi.key.KeyComparator; import com.hedera.node.app.spi.signatures.SignatureVerification; import com.hedera.node.app.spi.signatures.VerificationAssistant; import com.hedera.node.app.workflows.prehandle.FakeSignatureVerificationFuture; @@ -50,11 +51,13 @@ import com.hedera.pbj.runtime.io.buffer.Bytes; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.ArrayList; +import java.util.Comparator; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -73,6 +76,11 @@ @ExtendWith(MockitoExtension.class) class DefaultKeyVerifierTest { private static final int LEGACY_FEE_CALC_NETWORK_VPT = 13; + private static final Key ECDSA_X1 = FAKE_ECDSA_KEY_INFOS[1].publicKey(); + private static final Key ECDSA_X2 = FAKE_ECDSA_KEY_INFOS[2].publicKey(); + private static final Key ED25519_X1 = FAKE_ED25519_KEY_INFOS[1].publicKey(); + private static final Key ED25519_X2 = FAKE_ED25519_KEY_INFOS[2].publicKey(); + private static final Comparator KEY_COMPARATOR = new KeyComparator(); private static final HederaConfig HEDERA_CONFIG = HederaTestConfigBuilder.createConfig().getConfigData(HederaConfig.class); @@ -199,6 +207,48 @@ static Stream provideCompoundKeys() { } } + @Nested + @DisplayName("Only keys with valid signatures are returned by signingCryptoKeys()") + class SigningCryptoKeysTests { + @ParameterizedTest + @MethodSource("variousValidityScenarios") + void exactlyKeysWithValidKeysAreReturned(@NonNull final Map keysAndPassFail) { + final var subject = new DefaultKeyVerifier( + LEGACY_FEE_CALC_NETWORK_VPT, HEDERA_CONFIG, verificationResults(keysAndPassFail)); + final var expectedKeys = keysAndPassFail.entrySet().stream() + .filter(Entry::getValue) + .map(Entry::getKey) + .collect(Collectors.toCollection(() -> new TreeSet<>(KEY_COMPARATOR))); + final var actualKeys = subject.signingCryptoKeys(); + assertThat(actualKeys).isEqualTo(expectedKeys); + } + + static Stream variousValidityScenarios() { + return Stream.of( + Arguments.of(named( + "ECDSA_X1=pass, ECDSA_X2=pass, ED25519_X1=fail, ED25519_X2=fail", + Map.of( + ECDSA_X1, true, + ECDSA_X2, true, + ED25519_X1, false, + ED25519_X2, false))), + Arguments.of(named( + "ECDSA_X1=fail, ECDSA_X2=pass, ED25519_X1=pass, ED25519_X2=fail", + Map.of( + ECDSA_X1, false, + ECDSA_X2, true, + ED25519_X1, true, + ED25519_X2, false))), + Arguments.of(named( + "ECDSA_X1=fail, ECDSA_X2=fail, ED25519_X1=fail, ED25519_X2=fail", + Map.of( + ECDSA_X1, false, + ECDSA_X2, false, + ED25519_X1, false, + ED25519_X2, false)))); + } + } + /** * Tests to verify that finding a {@link SignatureVerification} for compound keys (threshold keys, key lists) that * also have duplicated keys. The point of these tests is really to verify that duplicate keys are counted multiple @@ -219,14 +269,10 @@ static Stream provideCompoundKeys() { @DisplayName("Finding SignatureVerification With Complex Keys with Duplicates") @ExtendWith(MockitoExtension.class) final class FindingSignatureVerificationWithDuplicateKeysTests { - // Used once in the key list - private static final Key ECDSA_X1 = FAKE_ECDSA_KEY_INFOS[1].publicKey(); - // Used twice in the key list - private static final Key ECDSA_X2 = FAKE_ECDSA_KEY_INFOS[2].publicKey(); - // Used once in the key list - private static final Key ED25519_X1 = FAKE_ED25519_KEY_INFOS[1].publicKey(); - // Used twice in the key list - private static final Key ED25519_X2 = FAKE_ED25519_KEY_INFOS[2].publicKey(); + // ECDSA_X1 is used once in the key list + // ECDSA_X2 is used twice in the key list + // ED25519_X1 is used once in the key list + // ED25519_X2 is used twice in the key list @BeforeEach void setup() { @@ -236,17 +282,6 @@ void setup() { }); } - private Map verificationResults(Map keysAndPassFail) { - final var results = new HashMap(); - for (final var entry : keysAndPassFail.entrySet()) { - results.put( - entry.getKey(), - new FakeSignatureVerificationFuture( - new SignatureVerificationImpl(entry.getKey(), null, entry.getValue()))); - } - return results; - } - @Test @DisplayName("All signatures are valid for the KeyList") void allValidInKeyList() { @@ -1282,4 +1317,15 @@ private static Key thresholdKey(int threshold, Key... keys) { .threshold(threshold)) .build(); } + + private static Map verificationResults(Map keysAndPassFail) { + final var results = new HashMap(); + for (final var entry : keysAndPassFail.entrySet()) { + results.put( + entry.getKey(), + new FakeSignatureVerificationFuture( + new SignatureVerificationImpl(entry.getKey(), null, entry.getValue()))); + } + return results; + } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DelegateKeyVerifierTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DelegateKeyVerifierTest.java deleted file mode 100644 index 7441ed8299e1..000000000000 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/signature/DelegateKeyVerifierTest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (C) 2023-2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.hedera.node.app.signature; - -import static com.hedera.node.app.signature.impl.SignatureVerificationImpl.failedVerification; -import static com.hedera.node.app.signature.impl.SignatureVerificationImpl.passedVerification; -import static com.hedera.node.app.spi.fixtures.Scenarios.ALICE; -import static com.hedera.node.app.spi.fixtures.Scenarios.ERIN; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.when; - -import com.hedera.hapi.node.base.Key; -import com.hedera.node.app.spi.signatures.VerificationAssistant; -import com.hedera.pbj.runtime.io.buffer.Bytes; -import java.util.function.Predicate; -import java.util.stream.Stream; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; - -@ExtendWith(MockitoExtension.class) -class DelegateKeyVerifierTest { - - private static final Key KEY = ALICE.keyInfo().publicKey(); - private static final Bytes ALIAS = ERIN.account().alias(); - - @Mock - private Predicate parentCallback; - - @Mock - private VerificationAssistant childCallback; - - private DelegateKeyVerifier subject; - - private static Stream getVerificationResults() { - return Stream.of( - Arguments.of(true, true), - Arguments.of(true, false), - Arguments.of(false, true), - Arguments.of(false, false)); - } - - @BeforeEach - void setup() { - subject = new DelegateKeyVerifier(parentCallback); - } - - @SuppressWarnings("ConstantConditions") - @Test - void testMethodsWithInvalidArguments() { - assertThatThrownBy(() -> new DelegateKeyVerifier(null)).isInstanceOf(NullPointerException.class); - - assertThatThrownBy(() -> subject.verificationFor((Key) null)).isInstanceOf(NullPointerException.class); - assertThatThrownBy(() -> subject.verificationFor(null, childCallback)).isInstanceOf(NullPointerException.class); - assertThatThrownBy(() -> subject.verificationFor(KEY, null)).isInstanceOf(NullPointerException.class); - assertThatThrownBy(() -> subject.verificationFor((Bytes) null)).isInstanceOf(NullPointerException.class); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testSimpleVerificationFor(boolean callbackResult) { - // given - when(parentCallback.test(KEY)).thenReturn(callbackResult); - - // when - final var actual = subject.verificationFor(KEY); - - // then - final var expected = callbackResult ? passedVerification(KEY) : failedVerification(KEY); - assertThat(actual).isEqualTo(expected); - } - - @ParameterizedTest - @MethodSource("getVerificationResults") - void testVerificationForFailingCallbacks(boolean parentResult, boolean childResult) { - // given - when(parentCallback.test(KEY)).thenReturn(parentResult); - - final var intermediateVerification = parentResult ? passedVerification(KEY) : failedVerification(KEY); - when(childCallback.test(KEY, intermediateVerification)).thenReturn(childResult); - - // when - final var actual = subject.verificationFor(KEY, childCallback); - - // then - final var expected = childResult ? passedVerification(KEY) : failedVerification(KEY); - assertThat(actual).isEqualTo(expected); - } - - @Test - void testSimpleVerificationForAccount() { - // when - final var result = subject.verificationFor(ALIAS); - - // then - assertThat(result).isEqualTo(failedVerification(ALIAS)); - } -} diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulatorTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulatorTest.java index cc1898b8fd5d..971ecadb1707 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulatorTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/SynchronizedThrottleAccumulatorTest.java @@ -68,13 +68,15 @@ void verifyCheckAndEnforceThrottleIsCalled() { void verifyCheckAndEnforceThrottleQueryIsCalled() { // given final var query = mock(Query.class); + final var state = mock(State.class); final var accountID = mock(AccountID.class); // when - subject.shouldThrottle(HederaFunctionality.CONTRACT_CREATE, query, accountID); + subject.shouldThrottle(HederaFunctionality.CONTRACT_CREATE, query, state, accountID); // then verify(throttleAccumulator, times(1)) - .checkAndEnforceThrottle(eq(HederaFunctionality.CONTRACT_CREATE), any(), eq(query), eq(accountID)); + .checkAndEnforceThrottle( + eq(HederaFunctionality.CONTRACT_CREATE), any(), eq(query), eq(state), eq(accountID)); } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java index 7429f3626b11..a0c4e833e9ab 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java @@ -28,16 +28,17 @@ import static com.hedera.hapi.node.base.HederaFunctionality.SCHEDULE_SIGN; import static com.hedera.hapi.node.base.HederaFunctionality.TOKEN_BURN; import static com.hedera.hapi.node.base.HederaFunctionality.TOKEN_MINT; +import static com.hedera.hapi.node.base.HederaFunctionality.TRANSACTION_GET_RECEIPT; import static com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema.SCHEDULES_BY_ID_KEY; import static com.hedera.node.app.throttle.ThrottleAccumulator.ThrottleType.FRONTEND_THROTTLE; import static com.hedera.pbj.runtime.ProtoTestTools.getThreadLocalDataBuffer; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.collection.IsIterableContainingInOrder.contains; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.verify; import com.fasterxml.jackson.databind.ObjectMapper; @@ -60,6 +61,8 @@ import com.hedera.hapi.node.scheduled.ScheduleCreateTransactionBody; import com.hedera.hapi.node.scheduled.ScheduleSignTransactionBody; import com.hedera.hapi.node.state.schedule.Schedule; +import com.hedera.hapi.node.state.token.Account; +import com.hedera.hapi.node.token.CryptoGetAccountBalanceQuery; import com.hedera.hapi.node.token.CryptoTransferTransactionBody; import com.hedera.hapi.node.token.TokenMintTransactionBody; import com.hedera.hapi.node.transaction.Query; @@ -69,12 +72,14 @@ import com.hedera.node.app.hapi.utils.throttles.BucketThrottle; import com.hedera.node.app.hapi.utils.throttles.DeterministicThrottle; import com.hedera.node.app.hapi.utils.throttles.GasLimitDeterministicThrottle; +import com.hedera.node.app.service.token.TokenService; import com.hedera.node.app.spi.fixtures.util.LogCaptor; import com.hedera.node.app.spi.fixtures.util.LogCaptureExtension; import com.hedera.node.app.spi.fixtures.util.LoggingSubject; import com.hedera.node.app.spi.fixtures.util.LoggingTarget; import com.hedera.node.app.workflows.TransactionInfo; import com.hedera.node.config.ConfigProvider; +import com.hedera.node.config.VersionedConfigImpl; import com.hedera.node.config.VersionedConfiguration; import com.hedera.node.config.data.AccountsConfig; import com.hedera.node.config.data.AutoCreationConfig; @@ -83,11 +88,14 @@ import com.hedera.node.config.data.LazyCreationConfig; import com.hedera.node.config.data.SchedulingConfig; import com.hedera.node.config.data.TokensConfig; +import com.hedera.node.config.testfixtures.HederaTestConfigBuilder; import com.hedera.pbj.runtime.ParseException; import com.hedera.pbj.runtime.io.buffer.Bytes; import com.swirlds.state.State; import com.swirlds.state.spi.ReadableKVState; import com.swirlds.state.spi.ReadableStates; +import com.swirlds.state.test.fixtures.MapReadableKVState; +import com.swirlds.state.test.fixtures.MapReadableStates; import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.io.InputStream; @@ -95,6 +103,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -200,11 +209,11 @@ void worksAsExpectedForKnownQueries() throws IOException, ParseException { // when final var queryPayerId = AccountID.newBuilder().accountNum(1_234L).build(); - var noAns = subject.checkAndEnforceThrottle(CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT, query, queryPayerId); - subject.checkAndEnforceThrottle(GET_VERSION_INFO, TIME_INSTANT.plusNanos(1), query, queryPayerId); - final var yesAns = - subject.checkAndEnforceThrottle(GET_VERSION_INFO, TIME_INSTANT.plusNanos(2), query, queryPayerId); - final var throttlesNow = subject.activeThrottlesFor(CRYPTO_GET_ACCOUNT_BALANCE); + var noAns = subject.checkAndEnforceThrottle(TRANSACTION_GET_RECEIPT, TIME_INSTANT, query, state, queryPayerId); + subject.checkAndEnforceThrottle(GET_VERSION_INFO, TIME_INSTANT.plusNanos(1), query, state, queryPayerId); + final var yesAns = subject.checkAndEnforceThrottle( + GET_VERSION_INFO, TIME_INSTANT.plusNanos(2), query, state, queryPayerId); + final var throttlesNow = subject.activeThrottlesFor(TRANSACTION_GET_RECEIPT); final var dNow = throttlesNow.get(0); // then @@ -213,6 +222,119 @@ void worksAsExpectedForKnownQueries() throws IOException, ParseException { assertEquals(10999999990000L, dNow.used()); } + @Test + void worksAsExpectedForSimpleGetBalanceThrottle() throws IOException, ParseException { + // given + final var config = HederaTestConfigBuilder.create() + .withValue("tokens.countingGetBalanceThrottleEnabled", false) + .getOrCreateConfig(); + given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1)); + subject = new ThrottleAccumulator( + () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle); + final var defs = getThrottleDefs("bootstrap/throttles.json"); + subject.rebuildFor(defs); + final var query = Query.newBuilder() + .cryptogetAccountBalance(CryptoGetAccountBalanceQuery.newBuilder() + .accountID(RECEIVER_ID) + .build()) + .build(); + final var account = Account.newBuilder().numberAssociations(2).build(); + final var states = MapReadableStates.builder() + .state(new MapReadableKVState<>("ACCOUNTS", Map.of(RECEIVER_ID, account))) + .state(new MapReadableKVState<>("ALIASES", Map.of())) + .build(); + lenient().when(state.getReadableStates(TokenService.NAME)).thenReturn(states); + + // when + final var result = new boolean[] { + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(0), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(1), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(2), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(3), query, state, PAYER_ID) + }; + + // then + assertThat(result).containsExactly(false, false, false, true); + } + + @Test + void worksAsExpectedForCountingGetBalanceThrottle() throws IOException, ParseException { + // given + final var config = HederaTestConfigBuilder.create() + .withValue("tokens.countingGetBalanceThrottleEnabled", true) + .getOrCreateConfig(); + given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1)); + subject = new ThrottleAccumulator( + () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle); + final var defs = getThrottleDefs("bootstrap/throttles.json"); + subject.rebuildFor(defs); + final var query = Query.newBuilder() + .cryptogetAccountBalance(CryptoGetAccountBalanceQuery.newBuilder() + .accountID(RECEIVER_ID) + .build()) + .build(); + final var account = Account.newBuilder().numberAssociations(2).build(); + final var states = MapReadableStates.builder() + .state(new MapReadableKVState<>("ACCOUNTS", Map.of(RECEIVER_ID, account))) + .state(new MapReadableKVState<>("ALIASES", Map.of())) + .build(); + given(state.getReadableStates(TokenService.NAME)).willReturn(states); + + // when + final var result = new boolean[] { + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(0), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(1), query, state, PAYER_ID) + }; + + // then + assertThat(result).containsExactly(false, true); + } + + @Test + void worksAsExpectedForCountingGetBalanceThrottleWithEmptyAccount() throws IOException, ParseException { + // given + final var config = HederaTestConfigBuilder.create() + .withValue("tokens.countingGetBalanceThrottleEnabled", true) + .getOrCreateConfig(); + given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1)); + subject = new ThrottleAccumulator( + () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle); + final var defs = getThrottleDefs("bootstrap/throttles.json"); + subject.rebuildFor(defs); + final var query = Query.newBuilder() + .cryptogetAccountBalance(CryptoGetAccountBalanceQuery.newBuilder() + .accountID(RECEIVER_ID) + .build()) + .build(); + final var account = Account.newBuilder().numberAssociations(0).build(); + final var states = MapReadableStates.builder() + .state(new MapReadableKVState<>("ACCOUNTS", Map.of(RECEIVER_ID, account))) + .state(new MapReadableKVState<>("ALIASES", Map.of())) + .build(); + given(state.getReadableStates(TokenService.NAME)).willReturn(states); + + // when + final var result = new boolean[] { + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(0), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(1), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(2), query, state, PAYER_ID), + subject.checkAndEnforceThrottle( + CRYPTO_GET_ACCOUNT_BALANCE, TIME_INSTANT.plusNanos(3), query, state, PAYER_ID) + }; + + // then + assertThat(result).containsExactly(false, false, false, true); + } + @Test void worksAsExpectedForUnknownQueries() throws IOException, ParseException { // given @@ -229,7 +351,8 @@ void worksAsExpectedForUnknownQueries() throws IOException, ParseException { // then final var queryPayerId = AccountID.newBuilder().accountNum(1_234L).build(); - assertTrue(subject.checkAndEnforceThrottle(NETWORK_GET_EXECUTION_TIME, TIME_INSTANT, query, queryPayerId)); + assertTrue( + subject.checkAndEnforceThrottle(NETWORK_GET_EXECUTION_TIME, TIME_INSTANT, query, state, queryPayerId)); } @ParameterizedTest @@ -247,7 +370,7 @@ void checkAndClaimThrottlesByGasAndTotalAllowedGasPerSecNotSetOrZero( // then System.out.println(logCaptor.warnLogs()); - assertThat(logCaptor.warnLogs(), contains(throttleType + " gas throttling enabled, but limited to 0 gas/sec")); + assertThat(logCaptor.warnLogs()).contains(throttleType + " gas throttling enabled, but limited to 0 gas/sec"); } @ParameterizedTest @@ -1101,7 +1224,8 @@ void constructsExpectedBucketsFromTestResource(ThrottleAccumulator.ThrottleType DeterministicThrottle.withMtpsAndBurstPeriod(15_000_000, 2), DeterministicThrottle.withMtpsAndBurstPeriod(5_000, 2), DeterministicThrottle.withMtpsAndBurstPeriod(50_000, 3), - DeterministicThrottle.withMtpsAndBurstPeriod(5000, 4)); + DeterministicThrottle.withMtpsAndBurstPeriod(5000, 4), + DeterministicThrottle.withMtpsAndBurstPeriod(3000, 1)); // when subject.rebuildFor(defs); diff --git a/hedera-node/test-clients/src/test/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/tss/FakeTssLibraryTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/PlaceholderTssLibraryTest.java similarity index 94% rename from hedera-node/test-clients/src/test/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/tss/FakeTssLibraryTest.java rename to hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/PlaceholderTssLibraryTest.java index 2f8cb16bc791..818487051ba0 100644 --- a/hedera-node/test-clients/src/test/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/tss/FakeTssLibraryTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/PlaceholderTssLibraryTest.java @@ -14,9 +14,9 @@ * limitations under the License. */ -package com.hedera.services.bdd.junit.hedera.embedded.fakes.tss; +package com.hedera.node.app.tss; -import static com.hedera.services.bdd.junit.hedera.embedded.fakes.tss.FakeTssLibrary.SIGNATURE_SCHEMA; +import static com.hedera.node.app.tss.PlaceholderTssLibrary.SIGNATURE_SCHEMA; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -38,11 +38,11 @@ import java.util.List; import org.junit.jupiter.api.Test; -class FakeTssLibraryTest { +class PlaceholderTssLibraryTest { @Test void sign() { - final var fakeTssLibrary = new FakeTssLibrary(1); + final var fakeTssLibrary = new PlaceholderTssLibrary(1); final var privateKeyElement = new FakeFieldElement(BigInteger.valueOf(2L)); final var pairingPrivateKey = new PairingPrivateKey(privateKeyElement, SIGNATURE_SCHEMA); final var privateShare = new TssPrivateShare(new TssShareId(1), pairingPrivateKey); @@ -56,7 +56,7 @@ void sign() { @Test void aggregatePrivateShares() { - final var fakeTssLibrary = new FakeTssLibrary(2); + final var fakeTssLibrary = new PlaceholderTssLibrary(2); final var privateShares = new ArrayList(); final var privateKeyShares = new long[] {1, 2, 3}; for (int i = 0; i < privateKeyShares.length; i++) { @@ -73,7 +73,7 @@ void aggregatePrivateShares() { @Test void aggregatePrivateSharesWithNotEnoughShares() { - final var fakeTssLibrary = new FakeTssLibrary(3); + final var fakeTssLibrary = new PlaceholderTssLibrary(3); final var privateShares = new ArrayList(); final var privateKeyShares = new long[] {1, 2}; for (int i = 0; i < privateKeyShares.length; i++) { @@ -89,7 +89,7 @@ void aggregatePrivateSharesWithNotEnoughShares() { @Test void aggregatePublicShares() { - final var fakeTssLibrary = new FakeTssLibrary(2); + final var fakeTssLibrary = new PlaceholderTssLibrary(2); final var publicShares = new ArrayList(); final var publicKeyShares = new long[] {1, 2, 3}; for (int i = 0; i < publicKeyShares.length; i++) { @@ -106,7 +106,7 @@ void aggregatePublicShares() { @Test void aggregateSignatures() { - final var fakeTssLibrary = new FakeTssLibrary(2); + final var fakeTssLibrary = new PlaceholderTssLibrary(2); final var partialSignatures = new ArrayList(); final var signatureShares = new long[] {1, 2, 3}; for (int i = 0; i < signatureShares.length; i++) { @@ -148,7 +148,7 @@ void verifySignature() { } final var threshold = 2; - final var fakeTssLibrary = new FakeTssLibrary(threshold); + final var fakeTssLibrary = new PlaceholderTssLibrary(threshold); final PairingPublicKey ledgerID = fakeTssLibrary.aggregatePublicShares(publicShares); final TssParticipantDirectory p0sDirectory = diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceImplTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceImplTest.java index ea7631f248f5..5a8630d92ef2 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceImplTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceImplTest.java @@ -64,7 +64,12 @@ class TssBaseServiceImplTest { @BeforeEach void setUp() { given(appContext.gossip()).willReturn(gossip); - subject = new TssBaseServiceImpl(appContext, ForkJoinPool.commonPool(), ForkJoinPool.commonPool()); + subject = new TssBaseServiceImpl( + appContext, + ForkJoinPool.commonPool(), + ForkJoinPool.commonPool(), + new PlaceholderTssLibrary(), + ForkJoinPool.commonPool()); } @Test diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceTest.java index 45b095dfdc83..3b624b4ca2f7 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssBaseServiceTest.java @@ -16,7 +16,7 @@ package com.hedera.node.app.tss; -import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssCryptographyManagerTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssCryptographyManagerTest.java new file mode 100644 index 000000000000..0959bfc4b3d5 --- /dev/null +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/TssCryptographyManagerTest.java @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.tss; + +import static com.hedera.node.app.tss.handlers.TssUtils.computeNodeShares; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; +import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; +import com.hedera.node.app.info.NodeInfoImpl; +import com.hedera.node.app.spi.AppContext; +import com.hedera.node.app.spi.store.StoreFactory; +import com.hedera.node.app.spi.workflows.HandleContext; +import com.hedera.node.app.tss.api.TssLibrary; +import com.hedera.node.app.tss.api.TssParticipantDirectory; +import com.hedera.node.app.tss.api.TssPublicShare; +import com.hedera.node.app.tss.api.TssShareId; +import com.hedera.node.app.tss.pairings.FakeGroupElement; +import com.hedera.node.app.tss.pairings.PairingPublicKey; +import com.hedera.node.app.tss.stores.WritableTssStore; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.crypto.Signature; +import com.swirlds.state.spi.info.NetworkInfo; +import java.math.BigInteger; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ForkJoinPool; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +public class TssCryptographyManagerTest { + private TssCryptographyManager subject; + + @Mock + private TssLibrary tssLibrary; + + @Mock + private TssParticipantDirectory tssParticipantDirectory; + + @Mock + private AppContext.Gossip gossip; + + @Mock(strictness = Mock.Strictness.LENIENT) + private HandleContext handleContext; + + @Mock + private StoreFactory storeFactory; + + @Mock + private WritableTssStore tssStore; + + @Mock(strictness = Mock.Strictness.LENIENT) + private NetworkInfo networkInfo; + + @BeforeEach + void setUp() { + subject = new TssCryptographyManager(tssLibrary, gossip, ForkJoinPool.commonPool()); + when(handleContext.networkInfo()).thenReturn(networkInfo); + when(networkInfo.selfNodeInfo()).thenReturn(new NodeInfoImpl(0, AccountID.DEFAULT, 0, null, null)); + } + + @Test + void testWhenVoteAlreadySubmitted() { + final var body = getTssBody(); + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssStore); + when(tssStore.getVote(any())).thenReturn(mock(TssVoteTransactionBody.class)); // Simulate vote already submitted + + final var result = subject.handleTssMessageTransaction(body, tssParticipantDirectory, handleContext); + + assertNull(result.join()); + } + + @Test + void testWhenVoteNoVoteSubmittedAndThresholdNotMet() { + final var body = getTssBody(); + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssStore); + when(tssStore.getVote(any())).thenReturn(null); + + final var result = subject.handleTssMessageTransaction(body, tssParticipantDirectory, handleContext); + + assertNull(result.join()); + } + + @Test + void testWhenVoteNoVoteSubmittedAndThresholdMet() { + final var ledgerId = mock(PairingPublicKey.class); + final var mockPublicShares = List.of(new TssPublicShare(new TssShareId(10), mock(PairingPublicKey.class))); + final var mockSignature = mock(Signature.class); + + final var body = getTssBody(); + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssStore); + when(tssStore.getVote(any())).thenReturn(null); + when(tssStore.getTssMessages(any())).thenReturn(List.of(body)); + when(tssLibrary.verifyTssMessage(any(), any())).thenReturn(true); + + when(tssLibrary.computePublicShares(any(), any())).thenReturn(mockPublicShares); + when(tssLibrary.aggregatePublicShares(any())).thenReturn(ledgerId); + when(gossip.sign(any())).thenReturn(mockSignature); + when(ledgerId.publicKey()).thenReturn(new FakeGroupElement(BigInteger.valueOf(5L))); + + final var result = subject.handleTssMessageTransaction(body, tssParticipantDirectory, handleContext); + + assertNotNull(result.join()); + verify(gossip).sign(ledgerId.publicKey().toBytes()); + } + + @Test + void testWhenMetException() { + final var body = getTssBody(); + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssStore); + when(tssStore.getVote(any())).thenReturn(null); + when(tssStore.getTssMessages(any())).thenReturn(List.of(body)); + when(tssLibrary.verifyTssMessage(any(), any())).thenReturn(true); + + when(tssLibrary.computePublicShares(any(), any())).thenThrow(new RuntimeException()); + + final var result = subject.handleTssMessageTransaction(body, tssParticipantDirectory, handleContext); + + assertNull(result.join()); + verify(gossip, never()).sign(any()); + } + + @Test + void testComputeNodeShares() { + RosterEntry entry1 = new RosterEntry(1L, 100L, null, null, null); + RosterEntry entry2 = new RosterEntry(2L, 50L, null, null, null); + + Map result = computeNodeShares(List.of(entry1, entry2), 10L); + + assertEquals(2, result.size()); + assertEquals(10L, result.get(1L)); + assertEquals(5L, result.get(2L)); + } + + private TssMessageTransactionBody getTssBody() { + final Bytes targetRosterHash = Bytes.wrap("targetRoster".getBytes()); + final Bytes sourceRosterHash = Bytes.wrap("sourceRoster".getBytes()); + return TssMessageTransactionBody.newBuilder() + .tssMessage(Bytes.wrap("tssMessage".getBytes())) + .shareIndex(1) + .sourceRosterHash(sourceRosterHash) + .targetRosterHash(targetRosterHash) + .build(); + } +} diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssMessageHandlerTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssMessageHandlerTest.java index be02eafc8a79..e64456a24164 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssMessageHandlerTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssMessageHandlerTest.java @@ -17,16 +17,41 @@ package com.hedera.node.app.tss.handlers; import static com.hedera.node.app.fixtures.AppTestBase.DEFAULT_CONFIG; +import static com.hedera.node.app.tss.PlaceholderTssLibrary.SIGNATURE_SCHEMA; import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody; +import com.hedera.node.app.spi.AppContext; +import com.hedera.node.app.spi.store.StoreFactory; import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.spi.workflows.PreHandleContext; +import com.hedera.node.app.tss.TssCryptographyManager; +import com.hedera.node.app.tss.TssCryptographyManager.LedgerIdWithSignature; +import com.hedera.node.app.tss.api.TssParticipantDirectory; +import com.hedera.node.app.tss.pairings.FakeGroupElement; +import com.hedera.node.app.tss.pairings.PairingPrivateKey; +import com.hedera.node.app.tss.pairings.PairingPublicKey; +import com.hedera.node.app.tss.stores.WritableTssStore; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.crypto.Signature; +import com.swirlds.platform.state.service.ReadableRosterStore; import com.swirlds.state.spi.info.NetworkInfo; import com.swirlds.state.spi.info.NodeInfo; +import java.math.BigInteger; import java.time.Instant; +import java.util.BitSet; +import java.util.List; +import java.util.concurrent.CompletableFuture; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -54,31 +79,105 @@ class TssMessageHandlerTest { @Mock(strictness = Mock.Strictness.LENIENT) private NetworkInfo networkInfo; + @Mock + private AppContext.Gossip gossip; + + @Mock(strictness = Mock.Strictness.LENIENT) + private TssCryptographyManager tssCryptographyManager; + + @Mock + private PairingPublicKey pairingPublicKey; + + @Mock + private Signature signature; + + @Mock + private StoreFactory storeFactory; + + @Mock + private WritableTssStore tssStore; + + @Mock + private ReadableRosterStore readableRosterStore; + + @Mock + private PairingPrivateKey pairingPrivateKey; + + private Roster roster; private TssMessageHandler subject; + private LedgerIdWithSignature ledgerIdWithSignature; + private TssParticipantDirectory tssParticipantDirectory; @BeforeEach void setUp() { - subject = new TssMessageHandler(submissionManager); + final var voteBitSet = new BitSet(8); + voteBitSet.set(2); + ledgerIdWithSignature = new LedgerIdWithSignature(pairingPublicKey, signature, voteBitSet); + roster = new Roster(List.of( + RosterEntry.newBuilder().nodeId(1).weight(100).build(), + RosterEntry.newBuilder().nodeId(2).weight(50).build())); + tssParticipantDirectory = TssParticipantDirectory.createBuilder() + .withSelf(1, pairingPrivateKey) + .withParticipant(1, 10, pairingPublicKey) + .build(SIGNATURE_SCHEMA); + + subject = new TssMessageHandler(submissionManager, gossip, tssCryptographyManager); } @Test void nothingImplementedYet() { assertDoesNotThrow(() -> subject.preHandle(preHandleContext)); - assertDoesNotThrow(() -> subject.pureChecks(tssMessage())); + assertDoesNotThrow(() -> subject.pureChecks(getTssBody())); } @Test - void submitsToyVoteOnHandlingMessage() { + void submitsVoteOnHandlingMessageWhenThresholdMet() { given(handleContext.networkInfo()).willReturn(networkInfo); given(handleContext.consensusNow()).willReturn(CONSENSUS_NOW); given(handleContext.configuration()).willReturn(DEFAULT_CONFIG); given(networkInfo.selfNodeInfo()).willReturn(nodeInfo); given(nodeInfo.accountId()).willReturn(NODE_ACCOUNT_ID); + given(nodeInfo.nodeId()).willReturn(1L); + given(handleContext.body()).willReturn(getTssBody()); + given(readableRosterStore.getActiveRoster()).willReturn(roster); + given(pairingPublicKey.publicKey()).willReturn(new FakeGroupElement(BigInteger.valueOf(10))); + given(gossip.sign(any())).willReturn(signature); + + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssStore); + when(storeFactory.readableStore(ReadableRosterStore.class)).thenReturn(readableRosterStore); + + given(tssCryptographyManager.handleTssMessageTransaction( + eq(getTssBody().tssMessage()), any(TssParticipantDirectory.class), eq(handleContext))) + .willReturn(CompletableFuture.completedFuture(ledgerIdWithSignature)); + given(signature.getBytes()).willReturn(Bytes.wrap("test")); subject.handle(handleContext); + + verify(submissionManager).submitTssVote(any(), eq(handleContext)); + } + + @Test + public void testHandleException() { + when(handleContext.body()).thenReturn(getTssBody()); + when(tssCryptographyManager.handleTssMessageTransaction(any(), any(), any())) + .thenThrow(new RuntimeException("Simulated error")); + + // Execute the handler and ensure no vote is submitted + assertThrows(RuntimeException.class, () -> subject.handle(handleContext)); + verify(submissionManager, never()).submitTssVote(any(), any()); } - private TransactionBody tssMessage() { - return TransactionBody.DEFAULT; + public static TransactionBody getTssBody() { + final Bytes targetRosterHash = Bytes.wrap("targetRoster".getBytes()); + final Bytes sourceRosterHash = Bytes.wrap("sourceRoster".getBytes()); + return TransactionBody.newBuilder() + .tssMessage(TssMessageTransactionBody.newBuilder() + .tssMessage(Bytes.wrap("tssMessage".getBytes())) + .shareIndex(1) + .sourceRosterHash(sourceRosterHash) + .targetRosterHash(targetRosterHash) + .build()) + .build(); } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssUtilsTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssUtilsTest.java new file mode 100644 index 000000000000..cd31672f55e1 --- /dev/null +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssUtilsTest.java @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.tss.handlers; + +import static com.hedera.node.app.tss.handlers.TssMessageHandlerTest.getTssBody; +import static com.hedera.node.app.tss.handlers.TssUtils.validateTssMessages; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.node.app.tss.api.TssLibrary; +import com.hedera.node.app.tss.api.TssParticipantDirectory; +import java.util.List; +import org.junit.jupiter.api.Test; + +public class TssUtilsTest { + @Test + public void testComputeTssParticipantDirectory() { + RosterEntry rosterEntry1 = new RosterEntry(1L, 100L, null, null, null); + RosterEntry rosterEntry2 = new RosterEntry(2L, 50L, null, null, null); + long maxSharesPerNode = 10L; + int selfNodeId = 1; + + TssParticipantDirectory directory = TssUtils.computeTssParticipantDirectory( + new Roster(List.of(rosterEntry1, rosterEntry2)), maxSharesPerNode, selfNodeId); + + assertNotNull(directory); + assertEquals((15 + 2) / 2, directory.getThreshold()); + assertEquals(10, directory.getCurrentParticipantOwnedShares().size()); + assertEquals(15, directory.getShareIds().size()); + } + + @Test + public void testValidateTssMessages() { + final var body = getTssBody(); + final var tssLibrary = mock(TssLibrary.class); + final var tssParticipantDirectory = mock(TssParticipantDirectory.class); + given(tssLibrary.verifyTssMessage(any(), any())).willReturn(true); + + final var validMessages = + validateTssMessages(List.of(body.tssMessageOrThrow()), tssParticipantDirectory, tssLibrary); + + assertEquals(1, validMessages.size()); + } + + @Test + public void testValidateTssMessagesFails() { + final var body = getTssBody(); + final var tssLibrary = mock(TssLibrary.class); + final var tssParticipantDirectory = mock(TssParticipantDirectory.class); + given(tssLibrary.verifyTssMessage(any(), any())).willReturn(false); + + final var validMessages = + validateTssMessages(List.of(body.tssMessageOrThrow()), tssParticipantDirectory, tssLibrary); + + assertEquals(0, validMessages.size()); + } + + @Test + public void testGetTssMessages() { + final var body = getTssBody(); + final var validTssOps = List.of(body.tssMessageOrThrow()); + final var tssMessages = TssUtils.getTssMessages(validTssOps); + + assertEquals(1, tssMessages.size()); + assertThat(body.tssMessageOrThrow().tssMessage().toByteArray()) + .isEqualTo(tssMessages.get(0).bytes()); + } + + @Test + public void testComputeNodeShares() { + RosterEntry entry1 = new RosterEntry(1L, 100L, null, null, null); + RosterEntry entry2 = new RosterEntry(2L, 50L, null, null, null); + + List entries = List.of(entry1, entry2); + long maxTssMessagesPerNode = 10L; + + final var shares = TssUtils.computeNodeShares(entries, maxTssMessagesPerNode); + + assertEquals(2, shares.size()); + assertEquals(10L, shares.get(1L)); + assertEquals(5L, shares.get(2L)); + } + + @Test + public void testComputeNodeSharesEmptyRoster() { + List entries = List.of(); + long maxTssMessagesPerNode = 10L; + + final var shares = TssUtils.computeNodeShares(entries, maxTssMessagesPerNode); + + assertTrue(shares.isEmpty()); + } +} diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssVoteHandlerTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssVoteHandlerTest.java index cac2aea3bd97..24bfecfc37af 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssVoteHandlerTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/tss/handlers/TssVoteHandlerTest.java @@ -17,42 +17,201 @@ package com.hedera.node.app.tss.handlers; import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mock.Strictness.LENIENT; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.hapi.node.state.tss.TssVoteMapKey; import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody; +import com.hedera.node.app.spi.store.StoreFactory; import com.hedera.node.app.spi.workflows.HandleContext; +import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.PreHandleContext; +import com.hedera.node.app.tss.stores.WritableTssStore; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.platform.state.service.ReadableRosterStore; +import com.swirlds.state.spi.info.NodeInfo; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.MockitoAnnotations; import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) class TssVoteHandlerTest { - @Mock - private TssSubmissions submissionManager; @Mock private PreHandleContext preHandleContext; - @Mock + @Mock(strictness = LENIENT) private HandleContext handleContext; - private TssVoteHandler subject; + @Mock + private WritableTssStore tssBaseStore; + + @Mock + private ReadableRosterStore rosterStore; + + @Mock + private TssVoteTransactionBody tssVoteTransactionBody; + + @Mock + private TransactionBody transactionBody; + + @Mock + private StoreFactory storeFactory; + + @Mock(strictness = LENIENT) + private NodeInfo nodeInfo; + + private TssVoteHandler tssVoteHandler; @BeforeEach void setUp() { - subject = new TssVoteHandler(); + MockitoAnnotations.openMocks(this); + tssVoteHandler = new TssVoteHandler(); + when(handleContext.creatorInfo()).thenReturn(nodeInfo); + when(nodeInfo.nodeId()).thenReturn(1L); + } + + @Test + void handleDoesNotThrowWhenValidContext() throws HandleException { + when(handleContext.body()).thenReturn(transactionBody); + when(transactionBody.tssVoteOrThrow()).thenReturn(tssVoteTransactionBody); + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssBaseStore); + ; + + when(tssVoteTransactionBody.targetRosterHash()).thenReturn(Bytes.EMPTY); + when(tssBaseStore.exists(any(TssVoteMapKey.class))).thenReturn(false); + + try (MockedStatic mockedStatic = mockStatic(TssVoteHandler.class)) { + mockedStatic + .when(() -> TssVoteHandler.hasReachedThreshold(any(), any())) + .thenReturn(false); + tssVoteHandler.handle(handleContext); + } + + verify(tssBaseStore).put(any(TssVoteMapKey.class), eq(tssVoteTransactionBody)); + } + + @Test + void handleReturnsWhenDuplicateVoteExists() throws HandleException { + when(handleContext.body()).thenReturn(transactionBody); + when(transactionBody.tssVoteOrThrow()).thenReturn(tssVoteTransactionBody); + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssBaseStore); + when(tssVoteTransactionBody.targetRosterHash()).thenReturn(Bytes.EMPTY); + when(tssBaseStore.exists(any(TssVoteMapKey.class))).thenReturn(true); + + tssVoteHandler.handle(handleContext); + + verify(tssBaseStore, never()).put(any(TssVoteMapKey.class), eq(tssVoteTransactionBody)); + } + + @Test + void hasReachedThresholdReturnsFalseWhenThresholdIsNotMet() { + // Setup in-memory data + final RosterEntry rosterEntry1 = new RosterEntry(1L, 1L, null, null, List.of()); + final RosterEntry rosterEntry2 = new RosterEntry(2L, 4L, null, null, List.of()); + final RosterEntry rosterEntry3 = new RosterEntry(3L, 2L, null, null, List.of()); + final Roster roster = new Roster(List.of(rosterEntry1, rosterEntry2, rosterEntry3)); + final TssVoteTransactionBody voteTransactionBody = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY); + final TssVoteTransactionBody voteTransactionBody2 = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.fromHex("01")); + final TssVoteTransactionBody voteTransactionBody3 = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.fromHex("02")); + + // Setup stores + final Map voteStore = new HashMap<>(); + voteStore.put(new TssVoteMapKey(Bytes.EMPTY, 1L), voteTransactionBody); + voteStore.put(new TssVoteMapKey(Bytes.EMPTY, 2L), voteTransactionBody2); + voteStore.put(new TssVoteMapKey(Bytes.EMPTY, 3L), voteTransactionBody3); + + // Mock behavior + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssBaseStore); + when(storeFactory.readableStore(ReadableRosterStore.class)).thenReturn(rosterStore); + when(rosterStore.getActiveRoster()).thenReturn(roster); + when(tssBaseStore.exists(any(TssVoteMapKey.class))) + .thenAnswer(invocation -> voteStore.containsKey(invocation.getArgument(0))); + when(tssBaseStore.getVote(any(TssVoteMapKey.class))) + .thenAnswer(invocation -> voteStore.get(invocation.getArgument(0))); + + final boolean result = TssVoteHandler.hasReachedThreshold(voteTransactionBody, handleContext); + + assertFalse(result, "Threshold should not be met"); + } + + @Test + void hasReachedThresholdReturnsTrueWhenThresholdIsMet() { + // Setup in-memory data + final RosterEntry rosterEntry1 = new RosterEntry(1L, 1L, null, null, List.of()); + final RosterEntry rosterEntry2 = new RosterEntry(2L, 2L, null, null, List.of()); + final RosterEntry rosterEntry3 = new RosterEntry(3L, 3L, null, null, List.of()); + final Roster roster = new Roster(List.of(rosterEntry1, rosterEntry2, rosterEntry3)); + final TssVoteTransactionBody voteTransactionBody = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY); + final TssVoteTransactionBody voteTransactionBody2 = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.fromHex("01")); + final TssVoteTransactionBody voteTransactionBody3 = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY); + + // Setup stores + final Map voteStore = new HashMap<>(); + voteStore.put(new TssVoteMapKey(Bytes.EMPTY, 1L), voteTransactionBody); + voteStore.put(new TssVoteMapKey(Bytes.EMPTY, 2L), voteTransactionBody2); + voteStore.put(new TssVoteMapKey(Bytes.EMPTY, 3L), voteTransactionBody3); + + // Mock behavior + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.writableStore(WritableTssStore.class)).thenReturn(tssBaseStore); + when(storeFactory.readableStore(ReadableRosterStore.class)).thenReturn(rosterStore); + when(rosterStore.getActiveRoster()).thenReturn(roster); + when(tssBaseStore.exists(any(TssVoteMapKey.class))) + .thenAnswer(invocation -> voteStore.containsKey(invocation.getArgument(0))); + when(tssBaseStore.getVote(any(TssVoteMapKey.class))) + .thenAnswer(invocation -> voteStore.get(invocation.getArgument(0))); + + boolean result = TssVoteHandler.hasReachedThreshold(voteTransactionBody, handleContext); + + assertTrue(result, "Threshold should be met"); } @Test - void nothingImplementedYet() { - assertDoesNotThrow(() -> subject.preHandle(preHandleContext)); - assertDoesNotThrow(() -> subject.pureChecks(tssVote())); - assertDoesNotThrow(() -> subject.handle(handleContext)); + void preHandleDoesNotThrowWhenContextIsValid() { + assertDoesNotThrow(() -> tssVoteHandler.preHandle(preHandleContext)); } - private TransactionBody tssVote() { - return TransactionBody.DEFAULT; + @Test + void pureChecksDoesNotThrowWhenTransactionBodyIsValid() { + assertDoesNotThrow(() -> tssVoteHandler.pureChecks(transactionBody)); + } + + @Test + void hasReachedThresholdThrowsIllegalArgumentExceptionWhenActiveRosterIsNull() { + when(handleContext.storeFactory()).thenReturn(storeFactory); + when(storeFactory.readableStore(ReadableRosterStore.class)).thenReturn(rosterStore); + when(rosterStore.getActiveRoster()).thenReturn(null); + + TssVoteTransactionBody voteTransactionBody = + new TssVoteTransactionBody(Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY, Bytes.EMPTY); + + assertThrows( + IllegalArgumentException.class, + () -> TssVoteHandler.hasReachedThreshold(voteTransactionBody, handleContext)); } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/DispatchProcessorTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/DispatchProcessorTest.java index aa56f2daf61d..7b5426829119 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/DispatchProcessorTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/DispatchProcessorTest.java @@ -69,6 +69,7 @@ import com.hedera.node.app.spi.signatures.SignatureVerification; import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.spi.workflows.HandleException; +import com.hedera.node.app.workflows.OpWorkflowMetrics; import com.hedera.node.app.workflows.TransactionInfo; import com.hedera.node.app.workflows.dispatcher.TransactionDispatcher; import com.hedera.node.app.workflows.handle.dispatch.DispatchValidator; @@ -168,6 +169,9 @@ class DispatchProcessorTest { @Mock private NetworkInfo networkInfo; + @Mock + private OpWorkflowMetrics opWorkflowMetrics; + private DispatchProcessor subject; @BeforeEach @@ -182,7 +186,8 @@ void setUp() { exchangeRateManager, dispatcher, ethereumTransactionHandler, - networkInfo); + networkInfo, + opWorkflowMetrics); given(dispatch.stack()).willReturn(stack); given(dispatch.recordBuilder()).willReturn(recordBuilder); } @@ -200,6 +205,7 @@ void creatorErrorAsExpected() { verify(feeAccumulator).chargeNetworkFee(CREATOR_ACCOUNT_ID, FEES.networkFee()); verify(recordBuilder).status(INVALID_PAYER_SIGNATURE); assertFinished(IsRootStack.NO); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -221,6 +227,8 @@ void waivedFeesDoesNotCharge() { verifyNoInteractions(feeAccumulator); verify(dispatcher).dispatchHandle(context); verify(recordBuilder).status(SUCCESS); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -240,6 +248,8 @@ void unauthorizedSystemDeleteIsNotSupported() { verifyTrackedFeePayments(); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(recordBuilder).status(NOT_SUPPORTED); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -259,6 +269,8 @@ void unauthorizedOtherIsUnauthorized() { verifyTrackedFeePayments(); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(recordBuilder).status(UNAUTHORIZED); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -280,6 +292,8 @@ void unprivilegedSystemUndeleteIsAuthorizationFailed() { verifyTrackedFeePayments(); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(recordBuilder).status(AUTHORIZATION_FAILED); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -301,6 +315,8 @@ void unprivilegedSystemDeleteIsImpermissible() { verifyTrackedFeePayments(); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(recordBuilder).status(ENTITY_NOT_ALLOWED_TO_DELETE); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -322,6 +338,8 @@ void invalidSignatureCryptoTransferFails() { verifyTrackedFeePayments(); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(recordBuilder).status(INVALID_SIGNATURE); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -345,6 +363,8 @@ void invalidHollowAccountCryptoTransferFails() { verifyTrackedFeePayments(); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(recordBuilder).status(INVALID_SIGNATURE); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -368,6 +388,8 @@ void thrownHandleExceptionRollsBackIfRequested() { verify(dispatcher).dispatchHandle(context); verify(recordBuilder).status(TOKEN_NOT_ASSOCIATED_TO_ACCOUNT); verify(feeAccumulator, times(2)).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); + assertFinished(); } @@ -391,6 +413,7 @@ void thrownHandleExceptionDoesNotRollBackIfNotRequested() { verify(dispatcher).dispatchHandle(context); verify(recordBuilder).status(CONTRACT_REVERT_EXECUTED); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(); } @@ -414,9 +437,32 @@ void consGasExhaustedWaivesServiceFee() throws ThrottleException { verify(recordBuilder).status(CONSENSUS_GAS_EXHAUSTED); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES.withoutServiceComponent()); + verify(opWorkflowMetrics).incrementThrottled(CONTRACT_CALL); assertFinished(); } + @Test + void throttledTxIncrementsMetric() throws ThrottleException { + given(dispatch.fees()).willReturn(FEES); + given(dispatch.feeAccumulator()).willReturn(feeAccumulator); + given(dispatchValidator.validationReportFor(dispatch)).willReturn(newSuccess(CREATOR_ACCOUNT_ID, PAYER)); + given(dispatch.payerId()).willReturn(PAYER_ACCOUNT_ID); + given(dispatch.txnInfo()).willReturn(CRYPTO_TRANSFER_TXN_INFO); + given(dispatch.txnCategory()).willReturn(HandleContext.TransactionCategory.CHILD); + givenAuthorization(CRYPTO_TRANSFER_TXN_INFO); + doThrow(ThrottleException.newGasThrottleException()) + .when(dispatchUsageManager) + .screenForCapacity(dispatch); + + subject.processDispatch(dispatch); + + verify(platformStateUpdates, never()).handleTxBody(stack, CRYPTO_TRANSFER_TXN_INFO.txBody()); + verify(recordBuilder).status(CONSENSUS_GAS_EXHAUSTED); + verify(feeAccumulator).chargeNetworkFee(PAYER_ACCOUNT_ID, FEES.totalFee()); + verify(opWorkflowMetrics).incrementThrottled(CRYPTO_TRANSFER); + assertFinished(IsRootStack.NO); + } + @Test void consGasExhaustedForEthTxnDoesExtraWork() throws ThrottleException { given(dispatch.fees()).willReturn(FEES); @@ -439,6 +485,7 @@ void consGasExhaustedForEthTxnDoesExtraWork() throws ThrottleException { verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES.withoutServiceComponent()); verify(ethereumTransactionHandler).handleThrottled(context); + verify(opWorkflowMetrics).incrementThrottled(ETHEREUM_TRANSACTION); assertFinished(); } @@ -460,6 +507,7 @@ void failInvalidWaivesServiceFee() { verify(recordBuilder).status(FAIL_INVALID); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES.withoutServiceComponent()); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(); } @@ -486,6 +534,7 @@ void happyPathContractCallAsExpected() { verify(platformStateUpdates).handleTxBody(stack, CONTRACT_TXN_INFO.txBody()); verify(recordBuilder, times(2)).status(SUCCESS); verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(); } @@ -505,6 +554,7 @@ void happyPathChildCryptoTransferAsExpected() { verify(platformStateUpdates, never()).handleTxBody(stack, CRYPTO_TRANSFER_TXN_INFO.txBody()); verify(recordBuilder).status(SUCCESS); verify(feeAccumulator).chargeNetworkFee(PAYER_ACCOUNT_ID, FEES.totalFee()); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(IsRootStack.NO); } @@ -522,6 +572,7 @@ void happyPathFreeChildCryptoTransferAsExpected() { verify(platformStateUpdates, never()).handleTxBody(stack, CRYPTO_TRANSFER_TXN_INFO.txBody()); verify(recordBuilder).status(SUCCESS); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(IsRootStack.NO); } @@ -545,6 +596,7 @@ void unableToAffordServiceFeesChargesAccordingly() { verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES.withoutServiceComponent()); verify(recordBuilder).status(INSUFFICIENT_ACCOUNT_BALANCE); verifyNoInteractions(dispatcher); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(); } @@ -564,6 +616,7 @@ void duplicateChargesAccordingly() { verify(feeAccumulator).chargeFees(PAYER_ACCOUNT_ID, CREATOR_ACCOUNT_ID, FEES.withoutServiceComponent()); verify(recordBuilder).status(DUPLICATE_TRANSACTION); verifyNoInteractions(dispatcher); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); assertFinished(); } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactoryTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactoryTest.java index 8b682da82c08..3c1ec51ebdb2 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactoryTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/dispatch/ChildDispatchFactoryTest.java @@ -17,6 +17,7 @@ package com.hedera.node.app.workflows.handle.dispatch; import static com.hedera.hapi.node.base.HederaFunctionality.CONTRACT_CALL; +import static com.hedera.node.app.fixtures.AppTestBase.DEFAULT_CONFIG; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -186,12 +187,13 @@ void noOpKeyVerifierAlwaysPasses() { @Test void keyVerifierWithNullCallbackIsNoOp() { - assertThat(ChildDispatchFactory.getKeyVerifier(null)).isInstanceOf(ChildDispatchFactory.NoOpKeyVerifier.class); + assertThat(ChildDispatchFactory.getKeyVerifier(null, DEFAULT_CONFIG)) + .isInstanceOf(ChildDispatchFactory.NoOpKeyVerifier.class); } @Test void keyVerifierOnlySupportsKeyVerification() { - final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback); + final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback, DEFAULT_CONFIG); assertThatThrownBy(() -> derivedVerifier.verificationFor(Key.DEFAULT, assistant)) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> derivedVerifier.verificationFor(Bytes.EMPTY)) @@ -201,14 +203,14 @@ void keyVerifierOnlySupportsKeyVerification() { @Test void keyVerifierPassesImmediatelyGivenTrueCallback() { - final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback); + final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback, DEFAULT_CONFIG); given(verifierCallback.test(AN_ED25519_KEY)).willReturn(true); assertThat(derivedVerifier.verificationFor(AN_ED25519_KEY).passed()).isTrue(); } @Test void keyVerifierUsesDelegateIfNotImmediatePass() { - final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback); + final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback, DEFAULT_CONFIG); given(verifierCallback.test(A_THRESHOLD_KEY)).willReturn(false); given(verifierCallback.test(AN_ED25519_KEY)).willReturn(true); assertThat(derivedVerifier.verificationFor(A_THRESHOLD_KEY).passed()).isTrue(); @@ -216,9 +218,10 @@ void keyVerifierUsesDelegateIfNotImmediatePass() { @Test void keyVerifierDetectsNoPass() { - final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback); + final var derivedVerifier = ChildDispatchFactory.getKeyVerifier(verifierCallback, DEFAULT_CONFIG); assertThat(derivedVerifier.verificationFor(A_THRESHOLD_KEY).passed()).isFalse(); - verify(verifierCallback).test(A_THRESHOLD_KEY); + verify(verifierCallback).test(AN_ED25519_KEY); + verify(verifierCallback).test(A_CONTRACT_ID_KEY); } @Test diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/metrics/OpWorkflowMetricsTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/metrics/OpWorkflowMetricsTest.java index 6ceadc9e6a31..228be9e61309 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/metrics/OpWorkflowMetricsTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/metrics/OpWorkflowMetricsTest.java @@ -27,6 +27,7 @@ import com.hedera.node.config.ConfigProvider; import com.hedera.node.config.VersionedConfigImpl; import com.hedera.node.config.testfixtures.HederaTestConfigBuilder; +import com.swirlds.metrics.api.Counter; import com.swirlds.metrics.api.Metrics; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -54,8 +55,11 @@ void testConstructorInitializesMetrics() { new OpWorkflowMetrics(metrics, configProvider); // then - final int transactionMetricsCount = (HederaFunctionality.values().length - 1) * 2; - assertThat(metrics.findMetricsByCategory("app")).hasSize(transactionMetricsCount + 1); + // subtract 1 to exclude HederaFunctionality.NONE + // multiply by 3 to account for max, avg, and throttle metrics which are created for each functionality + // add 1 to account for gasPerConsSec metric which is not functionality specific + final int transactionMetricsCount = ((HederaFunctionality.values().length - 1) * 3) + 1; + assertThat(metrics.findMetricsByCategory("app")).hasSize(transactionMetricsCount); } @Test @@ -68,6 +72,8 @@ void testInitialValue() { .isEqualTo(0); assertThat(metrics.getMetric("app", "cryptoCreateDurationAvg").get(VALUE)) .isEqualTo(0); + assertThat(metrics.getMetric("app", "cryptoCreateThrottledTxns").get(VALUE)) + .isSameAs(0L); } @SuppressWarnings("DataFlowIssue") @@ -81,6 +87,16 @@ void testUpdateDurationWithInvalidArguments() { .isInstanceOf(NullPointerException.class); } + @Test + void testIncrementThrottledWithInvalidArguments() { + // given + final var handleWorkflowMetrics = new OpWorkflowMetrics(metrics, configProvider); + + // when + assertThatThrownBy(() -> handleWorkflowMetrics.incrementThrottled(null)) + .isInstanceOf(NullPointerException.class); + } + @Test void testUpdateTransactionDurationSingleUpdate() { // given @@ -129,6 +145,19 @@ void testUpdateDurationThreeUpdates() { .isEqualTo(7); } + @Test + void testIncrementThrottled() { + // given + final var handleWorkflowMetrics = new OpWorkflowMetrics(metrics, configProvider); + + // when + handleWorkflowMetrics.incrementThrottled(HederaFunctionality.CRYPTO_CREATE); + + // then + final var throttledMetric = (Counter) metrics.getMetric("app", "cryptoCreateThrottledTxns"); + assertThat(throttledMetric.get()).isSameAs(1L); + } + @Test void testInitialStartConsensusRound() { // given diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdatesTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdatesTest.java index 248f411d009f..c8117e9c3a45 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdatesTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/NodeStakeUpdatesTest.java @@ -17,34 +17,63 @@ package com.hedera.node.app.workflows.handle.steps; import static com.hedera.node.app.fixtures.AppTestBase.DEFAULT_CONFIG; +import static com.hedera.node.app.service.addressbook.AddressBookHelper.NODES_KEY; import static com.hedera.node.app.service.token.impl.handlers.staking.StakePeriodManager.DEFAULT_STAKING_PERIOD_MINS; import static com.hedera.node.config.types.StreamMode.BLOCKS; import static com.hedera.node.config.types.StreamMode.RECORDS; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.notNull; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; +import com.hedera.hapi.node.base.ServiceEndpoint; import com.hedera.hapi.node.base.Timestamp; +import com.hedera.hapi.node.state.addressbook.Node; import com.hedera.hapi.node.state.blockrecords.BlockInfo; +import com.hedera.hapi.node.state.common.EntityNumber; +import com.hedera.hapi.node.state.primitives.ProtoBytes; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.hapi.node.state.roster.RosterState; +import com.hedera.hapi.node.state.roster.RoundRosterPair; import com.hedera.hapi.node.transaction.ExchangeRateSet; import com.hedera.node.app.fees.ExchangeRateManager; import com.hedera.node.app.records.ReadableBlockRecordStore; +import com.hedera.node.app.service.addressbook.ReadableNodeStore; +import com.hedera.node.app.service.addressbook.impl.ReadableNodeStoreImpl; import com.hedera.node.app.service.token.impl.handlers.staking.EndOfStakingPeriodUpdater; import com.hedera.node.app.service.token.records.TokenContext; +import com.hedera.node.app.spi.metrics.StoreMetricsService; +import com.hedera.node.app.spi.store.StoreFactory; +import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.tss.TssBaseService; import com.hedera.node.app.workflows.handle.Dispatch; import com.hedera.node.app.workflows.handle.stack.SavepointStackImpl; import com.hedera.node.config.data.StakingConfig; import com.hedera.node.config.testfixtures.HederaTestConfigBuilder; +import com.hedera.node.config.types.StreamMode; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.RosterStateId; import com.swirlds.config.api.Configuration; +import com.swirlds.platform.roster.RosterUtils; +import com.swirlds.state.spi.WritableKVState; +import com.swirlds.state.spi.WritableSingletonState; +import com.swirlds.state.spi.WritableStates; +import com.swirlds.state.test.fixtures.MapWritableKVState; import java.time.Duration; import java.time.Instant; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; @@ -75,21 +104,45 @@ class NodeStakeUpdatesTest { @Mock private Dispatch dispatch; + @Mock + private WritableStates writableStates; + + @Mock + private HandleContext handleContext; + + @Mock + private StoreFactory storeFactory; + + @Mock + private StoreMetricsService storeMetricsService; + + @Mock + private WritableSingletonState rosterState; + private NodeStakeUpdates subject; @BeforeEach void setUp() { given(context.readableStore(ReadableBlockRecordStore.class)).willReturn(blockStore); - subject = new NodeStakeUpdates(stakingPeriodCalculator, exchangeRateManager, tssBaseService); + subject = + new NodeStakeUpdates(stakingPeriodCalculator, exchangeRateManager, tssBaseService, storeMetricsService); } @SuppressWarnings("DataFlowIssue") @Test void nullArgConstructor() { - Assertions.assertThatThrownBy(() -> new NodeStakeUpdates(null, exchangeRateManager, tssBaseService)) + Assertions.assertThatThrownBy( + () -> new NodeStakeUpdates(null, exchangeRateManager, tssBaseService, storeMetricsService)) + .isInstanceOf(NullPointerException.class); + Assertions.assertThatThrownBy( + () -> new NodeStakeUpdates(stakingPeriodCalculator, null, tssBaseService, storeMetricsService)) + .isInstanceOf(NullPointerException.class); + Assertions.assertThatThrownBy(() -> + new NodeStakeUpdates(stakingPeriodCalculator, exchangeRateManager, null, storeMetricsService)) .isInstanceOf(NullPointerException.class); - Assertions.assertThatThrownBy(() -> new NodeStakeUpdates(stakingPeriodCalculator, null, tssBaseService)) + Assertions.assertThatThrownBy( + () -> new NodeStakeUpdates(stakingPeriodCalculator, exchangeRateManager, tssBaseService, null)) .isInstanceOf(NullPointerException.class); } @@ -254,14 +307,213 @@ void isNextStakingPeriodNowCustomStakingPeriodIsLater() { Assertions.assertThat(result).isTrue(); } + @Test + void stakingPeriodDoesntSetCandidateRosterForDisabledFlag() { + // Simulate staking information + given(context.configuration()).willReturn(newConfig(990, false)); + given(blockStore.getLastBlockInfo()) + .willReturn(BlockInfo.newBuilder() + .consTimeOfLastHandledTxn(new Timestamp(CONSENSUS_TIME_1234567.getEpochSecond(), 0)) + .build()); + given(context.consensusTime()).willReturn(CONSENSUS_TIME_1234567.plus(Duration.ofDays(2))); + + subject.process(dispatch, stack, context, StreamMode.RECORDS, false, Instant.EPOCH); + verifyNoInteractions(tssBaseService); + } + + @Test + @DisplayName("Service won't set the current candidate roster as the new candidate roster") + void doesntSetSameCandidateRoster() { + // Simulate staking information, + given(blockStore.getLastBlockInfo()) + .willReturn(BlockInfo.newBuilder() + .consTimeOfLastHandledTxn(new Timestamp(CONSENSUS_TIME_1234567.getEpochSecond(), 0)) + .build()); + given(context.consensusTime()).willReturn(CONSENSUS_TIME_1234567.plus(Duration.ofDays(2))); + + // Simulate disabled `keyCandidateRoster` property + given(context.configuration()).willReturn(newConfig(990, false)); + + subject.process(dispatch, stack, context, StreamMode.RECORDS, false, Instant.EPOCH); + verify(tssBaseService, never()).setCandidateRoster(any(), any()); + } + + @Test + @DisplayName("Service won't set the active roster as the new candidate roster") + void doesntSetActiveRosterAsCandidateRoster() { + // Simulate staking information + given(blockStore.getLastBlockInfo()) + .willReturn(BlockInfo.newBuilder() + .consTimeOfLastHandledTxn(new Timestamp(CONSENSUS_TIME_1234567.getEpochSecond(), 0)) + .build()); + given(context.consensusTime()).willReturn(CONSENSUS_TIME_1234567.plus(Duration.ofDays(2))); + + // Enable keyCandidateRoster + given(context.configuration()).willReturn(newConfig(DEFAULT_STAKING_PERIOD_MINS, true)); + + // Simulate the same address book input as the current candidate and active rosters + final var nodeStore = simulateNodes(RosterCase.NODE_1, RosterCase.NODE_2, RosterCase.NODE_3, RosterCase.NODE_4); + given(dispatch.handleContext()).willReturn(handleContext); + given(handleContext.storeFactory()).willReturn(storeFactory); + given(storeFactory.readableStore(ReadableNodeStore.class)).willReturn(nodeStore); + given(stack.getWritableStates(notNull())).willReturn(writableStates); + simulateCandidateAndActiveRosters(); + + // Attempt to set the (equivalent) active roster as the new candidate roster + subject.process(dispatch, stack, context, StreamMode.RECORDS, false, Instant.EPOCH); + verify(tssBaseService, never()).setCandidateRoster(any(), any()); + } + + @Test + void stakingPeriodSetsCandidateRosterForEnabledFlag() { + // Simulate staking information + given(blockStore.getLastBlockInfo()) + .willReturn(BlockInfo.newBuilder() + .consTimeOfLastHandledTxn(new Timestamp(CONSENSUS_TIME_1234567.getEpochSecond(), 0)) + .build()); + given(context.consensusTime()).willReturn(CONSENSUS_TIME_1234567.plus(Duration.ofDays(2))); + + // Enable keyCandidateRoster + given(context.configuration()).willReturn(newConfig(DEFAULT_STAKING_PERIOD_MINS, true)); + + // Simulate an updated address book + final var nodeStore = simulateNodes(RosterCase.NODE_1, RosterCase.NODE_2, RosterCase.NODE_3); + given(dispatch.handleContext()).willReturn(handleContext); + given(handleContext.storeFactory()).willReturn(storeFactory); + given(storeFactory.readableStore(ReadableNodeStore.class)).willReturn(nodeStore); + given(stack.getWritableStates(notNull())).willReturn(writableStates); + simulateCandidateAndActiveRosters(); + + subject.process(dispatch, stack, context, StreamMode.RECORDS, false, Instant.EPOCH); + verify(tssBaseService).setCandidateRoster(notNull(), notNull()); + } + + private ReadableNodeStore simulateNodes(Node... nodes) { + final Map translated = Arrays.stream(nodes) + .collect(Collectors.toMap( + n -> EntityNumber.newBuilder().number(n.nodeId()).build(), node -> node)); + final WritableKVState nodeWritableKVState = new MapWritableKVState<>(NODES_KEY, translated); + given(writableStates.get(NODES_KEY)).willReturn(nodeWritableKVState); + final ReadableNodeStore nodeStore = new ReadableNodeStoreImpl(writableStates); + given(context.readableStore(ReadableNodeStore.class)).willReturn(nodeStore); + + return nodeStore; + } + + private void simulateCandidateAndActiveRosters() { + given(rosterState.get()) + .willReturn(new RosterState( + RosterCase.CANDIDATE_ROSTER_HASH.value(), + List.of(RoundRosterPair.newBuilder() + .roundNumber(12345) + .activeRosterHash(RosterCase.ACTIVE_ROSTER_HASH.value()) + .build()))); + given(writableStates.getSingleton(RosterStateId.ROSTER_STATES_KEY)) + .willReturn(rosterState); + given(writableStates.get(RosterStateId.ROSTER_KEY)) + .willReturn(new MapWritableKVState<>( + RosterStateId.ROSTER_KEY, + Map.of( + RosterCase.CANDIDATE_ROSTER_HASH, + RosterCase.CURRENT_CANDIDATE_ROSTER, + RosterCase.ACTIVE_ROSTER_HASH, + RosterCase.ACTIVE_ROSTER))); + } + private Configuration newPeriodMinsConfig() { return newPeriodMinsConfig(DEFAULT_STAKING_PERIOD_MINS); } private Configuration newPeriodMinsConfig(final long periodMins) { + return newConfig(periodMins, false); + } + + private Configuration newConfig(final long periodMins, final boolean keyCandidateRoster) { return HederaTestConfigBuilder.create() .withConfigDataType(StakingConfig.class) .withValue("staking.periodMins", periodMins) + .withValue("tss.keyCandidateRoster", keyCandidateRoster) .getOrCreateConfig(); } + + private static class RosterCase { + static final Bytes BYTES_1_2_3 = Bytes.wrap("1, 2, 3"); + static final Node NODE_1 = Node.newBuilder() + .nodeId(1) + .weight(10) + .gossipCaCertificate(BYTES_1_2_3) + .gossipEndpoint(ServiceEndpoint.newBuilder() + .ipAddressV4(Bytes.wrap("1, 1")) + .port(11) + .build()) + .build(); + static final RosterEntry ROSTER_NODE_1 = RosterEntry.newBuilder() + .nodeId(NODE_1.nodeId()) + .weight(NODE_1.weight()) + .gossipCaCertificate(NODE_1.gossipCaCertificate()) + .gossipEndpoint(NODE_1.gossipEndpoint()) + .build(); + static final Node NODE_2 = Node.newBuilder() + .nodeId(2) + .weight(20) + .gossipCaCertificate(BYTES_1_2_3) + .gossipEndpoint(ServiceEndpoint.newBuilder() + .ipAddressV4(Bytes.wrap("2, 2")) + .port(22) + .build()) + .build(); + static final RosterEntry ROSTER_NODE_2 = RosterEntry.newBuilder() + .nodeId(NODE_2.nodeId()) + .weight(NODE_2.weight()) + .gossipCaCertificate(NODE_2.gossipCaCertificate()) + .gossipEndpoint((ServiceEndpoint.newBuilder() + .ipAddressV4(Bytes.wrap("2, 2")) + .port(22) + .build())) + .build(); + static final Node NODE_3 = Node.newBuilder() + .nodeId(3) + .weight(30) + .gossipCaCertificate(BYTES_1_2_3) + .gossipEndpoint(ServiceEndpoint.newBuilder() + .ipAddressV4(Bytes.wrap("3, 3")) + .port(33) + .build()) + .build(); + static final RosterEntry ROSTER_NODE_3 = RosterEntry.newBuilder() + .nodeId(NODE_3.nodeId()) + .weight(NODE_3.weight()) + .gossipCaCertificate(NODE_3.gossipCaCertificate()) + .gossipEndpoint(NODE_3.gossipEndpoint()) + .build(); + static final Node NODE_4 = Node.newBuilder() + .nodeId(4) + .weight(40) + .gossipCaCertificate(BYTES_1_2_3) + .gossipEndpoint(ServiceEndpoint.newBuilder() + .ipAddressV4(Bytes.wrap("4, 4")) + .port(44) + .build()) + .build(); + static final RosterEntry ROSTER_NODE_4 = RosterEntry.newBuilder() + .nodeId(NODE_4.nodeId()) + .weight(NODE_4.weight()) + .gossipCaCertificate(NODE_4.gossipCaCertificate()) + .gossipEndpoint(NODE_4.gossipEndpoint()) + .build(); + + static final Roster CURRENT_CANDIDATE_ROSTER = Roster.newBuilder() + .rosterEntries(List.of(ROSTER_NODE_1, ROSTER_NODE_2)) + .build(); + static final Roster ACTIVE_ROSTER = Roster.newBuilder() + .rosterEntries(ROSTER_NODE_1, ROSTER_NODE_2, ROSTER_NODE_3, ROSTER_NODE_4) + .build(); + + static final ProtoBytes CANDIDATE_ROSTER_HASH = ProtoBytes.newBuilder() + .value(RosterUtils.hash(CURRENT_CANDIDATE_ROSTER).getBytes()) + .build(); + static final ProtoBytes ACTIVE_ROSTER_HASH = ProtoBytes.newBuilder() + .value(RosterUtils.hash(ACTIVE_ROSTER).getBytes()) + .build(); + } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java index 6209d98524e4..b0c7cb196107 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java @@ -39,6 +39,7 @@ import com.hedera.hapi.platform.event.EventTransaction; import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener; import com.hedera.node.app.blocks.impl.KVStateChangeListener; +import com.hedera.node.app.blocks.impl.PairedStreamBuilder; import com.hedera.node.app.fees.ExchangeRateManager; import com.hedera.node.app.fees.FeeManager; import com.hedera.node.app.service.consensus.impl.ConsensusServiceImpl; @@ -55,7 +56,6 @@ import com.hedera.node.app.workflows.dispatcher.TransactionDispatcher; import com.hedera.node.app.workflows.handle.DispatchProcessor; import com.hedera.node.app.workflows.handle.dispatch.ChildDispatchFactory; -import com.hedera.node.app.workflows.handle.record.RecordStreamBuilder; import com.hedera.node.app.workflows.prehandle.PreHandleResult; import com.hedera.node.app.workflows.prehandle.PreHandleWorkflow; import com.hedera.node.config.ConfigProvider; @@ -171,7 +171,7 @@ void setUp() { } @Test - void usesRecordStreamBuilderWithDefaultConfig() { + void usesPairedStreamBuilderWithDefaultConfig() { given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(DEFAULT_CONFIG, 1)); final var subject = UserTxn.from( @@ -201,7 +201,7 @@ void usesRecordStreamBuilderWithDefaultConfig() { assertNotNull(subject.readableStoreFactory()); assertNotNull(subject.config()); - assertThat(subject.baseBuilder()).isInstanceOf(RecordStreamBuilder.class); + assertThat(subject.baseBuilder()).isInstanceOf(PairedStreamBuilder.class); } @Test diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/ingest/IngestCheckerTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/ingest/IngestCheckerTest.java index 819a574809a1..a287e74111e1 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/ingest/IngestCheckerTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/ingest/IngestCheckerTest.java @@ -16,6 +16,7 @@ package com.hedera.node.app.workflows.ingest; +import static com.hedera.hapi.node.base.HederaFunctionality.UNCHECKED_SUBMIT; import static com.hedera.hapi.node.base.ResponseCodeEnum.ACCOUNT_DELETED; import static com.hedera.hapi.node.base.ResponseCodeEnum.BUSY; import static com.hedera.hapi.node.base.ResponseCodeEnum.DUPLICATE_TRANSACTION; @@ -41,10 +42,11 @@ import static org.mockito.Mock.Strictness.LENIENT; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.hedera.hapi.node.base.AccountID; -import com.hedera.hapi.node.base.HederaFunctionality; import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.base.KeyList; import com.hedera.hapi.node.base.ResponseCodeEnum; @@ -71,6 +73,7 @@ import com.hedera.node.app.state.DeduplicationCache; import com.hedera.node.app.state.recordcache.DeduplicationCacheImpl; import com.hedera.node.app.throttle.SynchronizedThrottleAccumulator; +import com.hedera.node.app.workflows.OpWorkflowMetrics; import com.hedera.node.app.workflows.SolvencyPreCheck; import com.hedera.node.app.workflows.TransactionChecker; import com.hedera.node.app.workflows.TransactionInfo; @@ -130,6 +133,9 @@ class IngestCheckerTest extends AppTestBase { @Mock(strictness = LENIENT) private Authorizer authorizer; + @Mock + private OpWorkflowMetrics opWorkflowMetrics; + @Mock(strictness = LENIENT) private SynchronizedThrottleAccumulator synchronizedThrottleAccumulator; @@ -166,12 +172,7 @@ void setUp() throws PreCheckException { .build(); transactionInfo = new TransactionInfo( - tx, - txBody, - MOCK_SIGNATURE_MAP, - tx.signedTransactionBytes(), - HederaFunctionality.UNCHECKED_SUBMIT, - null); + tx, txBody, MOCK_SIGNATURE_MAP, tx.signedTransactionBytes(), UNCHECKED_SUBMIT, null); when(transactionChecker.check(tx, null)).thenReturn(transactionInfo); final var configProvider = HederaTestConfigBuilder.createConfigProvider(); @@ -192,7 +193,8 @@ void setUp() throws PreCheckException { feeManager, authorizer, synchronizedThrottleAccumulator, - instantSource); + instantSource, + opWorkflowMetrics); } @Nested @@ -218,6 +220,7 @@ void testParseAndCheckWithInactivePlatformFails(final PlatformStatus status) { assertThatThrownBy(() -> subject.checkNodeState()) .isInstanceOf(PreCheckException.class) .has(responseCode(PLATFORM_NOT_ACTIVE)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } } } @@ -242,12 +245,14 @@ void testWrongNodeIdFails() { feeManager, authorizer, synchronizedThrottleAccumulator, - instantSource); + instantSource, + opWorkflowMetrics); // Then the checker should throw a PreCheckException assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(INVALID_NODE_ACCOUNT)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -255,12 +260,7 @@ void testWrongNodeIdFails() { void testRunAllChecksSuccessfully() throws Exception { // given final var expected = new TransactionInfo( - tx, - txBody, - MOCK_SIGNATURE_MAP, - tx.signedTransactionBytes(), - HederaFunctionality.UNCHECKED_SUBMIT, - null); + tx, txBody, MOCK_SIGNATURE_MAP, tx.signedTransactionBytes(), UNCHECKED_SUBMIT, null); final var verificationResultFuture = mock(SignatureVerificationFuture.class); final var verificationResult = mock(SignatureVerification.class); when(verificationResult.failed()).thenReturn(false); @@ -273,6 +273,7 @@ void testRunAllChecksSuccessfully() throws Exception { // then assertThat(actual).isEqualTo(expected); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Nested @@ -298,6 +299,7 @@ void onsetFailsWithPreCheckException(ResponseCodeEnum failureReason) throws PreC assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(failureReason)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -310,6 +312,7 @@ void randomException() throws PreCheckException { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("check exception"); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } } @@ -326,6 +329,7 @@ void testThrottleFails() throws PreCheckException { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .hasFieldOrPropertyWithValue("responseCode", DUPLICATE_TRANSACTION); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } } @@ -344,6 +348,7 @@ void testThrottleFails() { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .hasFieldOrPropertyWithValue("responseCode", BUSY); + verify(opWorkflowMetrics).incrementThrottled(UNCHECKED_SUBMIT); } @Test @@ -357,6 +362,7 @@ void randomException() { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("shouldThrottle exception"); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } } @@ -377,6 +383,7 @@ void payerAccountStatusFails(ResponseCodeEnum failureReason) throws PreCheckExce assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(failureReason)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -391,6 +398,7 @@ void randomException() throws PreCheckException { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("checkPayerAccountStatus exception"); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } // NOTE: This should never happen in real life, but we need to code defensively for it anyway. @@ -405,6 +413,7 @@ void noKeyForPayer() throws PreCheckException { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(UNAUTHORIZED)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } } @@ -433,6 +442,7 @@ void payerAccountStatusFails(ResponseCodeEnum failureReason) .isInstanceOf(InsufficientBalanceException.class) .has(responseCode(failureReason)) .has(estimatedFee(123L)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -448,6 +458,7 @@ void randomException() throws PreCheckException, ExecutionException, Interrupted assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("checkSolvency exception"); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } private void givenValidPayerSignature() throws ExecutionException, InterruptedException, TimeoutException { @@ -475,6 +486,7 @@ void noPayerSignature() { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(INVALID_SIGNATURE)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -490,6 +502,7 @@ void payerVerificationFails() throws Exception { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(INVALID_SIGNATURE)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -517,12 +530,7 @@ void testKeyListVerificationSucceeds() throws Exception { .build())) .build(); final var myTransactionInfo = new TransactionInfo( - myTx, - myTxBody, - MOCK_SIGNATURE_MAP, - myTx.signedTransactionBytes(), - HederaFunctionality.UNCHECKED_SUBMIT, - null); + myTx, myTxBody, MOCK_SIGNATURE_MAP, myTx.signedTransactionBytes(), UNCHECKED_SUBMIT, null); when(transactionChecker.check(myTx, null)).thenReturn(myTransactionInfo); when(solvencyPreCheck.getPayerAccount(any(), eq(accountID))).thenReturn(account); final var verificationResultFutureAlice = mock(SignatureVerificationFuture.class); @@ -543,6 +551,7 @@ void testKeyListVerificationSucceeds() throws Exception { // then assertThat(actual).isEqualTo(myTransactionInfo); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -570,12 +579,7 @@ void testKeyListVerificationFails() throws Exception { .build())) .build(); final var myTransactionInfo = new TransactionInfo( - myTx, - myTxBody, - MOCK_SIGNATURE_MAP, - myTx.signedTransactionBytes(), - HederaFunctionality.UNCHECKED_SUBMIT, - null); + myTx, myTxBody, MOCK_SIGNATURE_MAP, myTx.signedTransactionBytes(), UNCHECKED_SUBMIT, null); when(transactionChecker.check(myTx, null)).thenReturn(myTransactionInfo); when(solvencyPreCheck.getPayerAccount(any(), eq(accountID))).thenReturn(account); final var verificationResultFutureAlice = mock(SignatureVerificationFuture.class); @@ -595,6 +599,7 @@ void testKeyListVerificationFails() throws Exception { assertThatThrownBy(() -> subject.runAllChecks(state, myTx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(INVALID_SIGNATURE)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -624,12 +629,7 @@ void testThresholdKeyVerificationSucceeds() throws Exception { .build())) .build(); final var myTransactionInfo = new TransactionInfo( - myTx, - myTxBody, - MOCK_SIGNATURE_MAP, - myTx.signedTransactionBytes(), - HederaFunctionality.UNCHECKED_SUBMIT, - null); + myTx, myTxBody, MOCK_SIGNATURE_MAP, myTx.signedTransactionBytes(), UNCHECKED_SUBMIT, null); when(transactionChecker.check(myTx, null)).thenReturn(myTransactionInfo); when(solvencyPreCheck.getPayerAccount(any(), eq(accountID))).thenReturn(account); final var verificationResultFutureAlice = mock(SignatureVerificationFuture.class); @@ -650,6 +650,7 @@ void testThresholdKeyVerificationSucceeds() throws Exception { // then assertThat(actual).isEqualTo(myTransactionInfo); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -679,12 +680,7 @@ void testThresholdKeyVerificationFails() throws Exception { .build())) .build(); final var myTransactionInfo = new TransactionInfo( - myTx, - myTxBody, - MOCK_SIGNATURE_MAP, - myTx.signedTransactionBytes(), - HederaFunctionality.UNCHECKED_SUBMIT, - null); + myTx, myTxBody, MOCK_SIGNATURE_MAP, myTx.signedTransactionBytes(), UNCHECKED_SUBMIT, null); when(transactionChecker.check(myTx, null)).thenReturn(myTransactionInfo); when(solvencyPreCheck.getPayerAccount(any(), eq(accountID))).thenReturn(account); final var verificationResultFutureAlice = mock(SignatureVerificationFuture.class); @@ -704,6 +700,7 @@ void testThresholdKeyVerificationFails() throws Exception { assertThatThrownBy(() -> subject.runAllChecks(state, myTx, configuration)) .isInstanceOf(PreCheckException.class) .has(responseCode(INVALID_SIGNATURE)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -721,6 +718,7 @@ void randomException() throws Exception { assertThatThrownBy(() -> subject.runAllChecks(state, tx, configuration)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("checkPayerSignature exception"); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } } } diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImplTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImplTest.java index f381cba8bd05..eba0292e00b3 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImplTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextImplTest.java @@ -95,14 +95,14 @@ class PreHandleContextImplTest implements Scenarios { void setup() throws PreCheckException { given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); given(accountStore.getAccountById(PAYER)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); final var txn = createAccountTransaction(); subject = new PreHandleContextImpl(storeFactory, txn, configuration, dispatcher); } @Test - void gettersWork() throws PreCheckException { + void gettersWork() { subject.requireKey(otherKey); assertThat(subject.body()).isEqualTo(createAccountTransaction()); diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextListUpdatesTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextListUpdatesTest.java index 3c5c840bfc40..015ae6cfed6e 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextListUpdatesTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/prehandle/PreHandleContextListUpdatesTest.java @@ -53,7 +53,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -@SuppressWarnings("removal") @ExtendWith(MockitoExtension.class) class PreHandleContextListUpdatesTest { @@ -121,7 +120,7 @@ class PreHandleContextListUpdatesTest { void gettersWorkAsExpectedWhenOnlyPayerKeyExist() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); final var txn = createAccountTransaction(); @@ -139,7 +138,7 @@ void gettersWorkAsExpectedWhenOnlyPayerKeyExist() throws PreCheckException { void nullInputToBuilderArgumentsThrows() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); // When we create a PreHandleContext by passing null as either argument @@ -173,7 +172,7 @@ void nullInputToBuilderArgumentsThrows() throws PreCheckException { void requireSomeOtherKey() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer, and a PreHandleContext given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); @@ -188,7 +187,7 @@ void requireSomeOtherKey() throws PreCheckException { void requireSomeOtherKeyTwice() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer, and a PreHandleContext given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); @@ -204,7 +203,7 @@ void requireSomeOtherKeyTwice() throws PreCheckException { void payerIsIgnoredWhenRequired() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer, and a PreHandleContext given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); @@ -231,7 +230,7 @@ void failsWhenPayerKeyDoesntExist() throws PreCheckException { void returnsIfGivenKeyIsPayer() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer and a PreHandleContext given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); @@ -251,7 +250,7 @@ void returnsIfGivenKeyIsPayer() throws PreCheckException { void returnsIfGivenKeyIsInvalidAccountId() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer and a PreHandleContext given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); @@ -265,7 +264,7 @@ void addsContractIdKey() throws PreCheckException { // Given an account with a key, and a transaction using that account as the payer, // and a contract account with a key, and a PreHandleContext given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(accountStore.getContractById(otherContractId)).willReturn(contractAccount); given(contractAccount.key()).willReturn(contractIdKey); given(contractAccount.keyOrElse(EMPTY_KEY_LIST)).willReturn(contractIdKey); @@ -286,7 +285,7 @@ void doesntFailForAliasedAccount() throws PreCheckException { final var alias = AccountID.newBuilder().alias(Bytes.wrap("test")).build(); given(accountStore.getAccountById(alias)).willReturn(account); given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); given(account.accountIdOrThrow()).willReturn(payer); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); @@ -307,7 +306,7 @@ void doesntFailForAliasedContract() throws PreCheckException { given(contractAccount.keyOrElse(EMPTY_KEY_LIST)).willReturn(otherKey); given(contractAccount.accountIdOrThrow()).willReturn(asAccount(otherContractId.contractNum())); given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher) @@ -322,7 +321,7 @@ void failsForInvalidAlias() throws PreCheckException { final var alias = AccountID.newBuilder().alias(Bytes.wrap("test")).build(); given(accountStore.getAccountById(alias)).willReturn(null); given(accountStore.getAccountById(payer)).willReturn(account); - given(account.key()).willReturn(payerKey); + given(account.keyOrThrow()).willReturn(payerKey); given(storeFactory.getStore(ReadableAccountStore.class)).willReturn(accountStore); subject = new PreHandleContextImpl(storeFactory, createAccountTransaction(), CONFIG, dispatcher); diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/query/QueryWorkflowImplTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/query/QueryWorkflowImplTest.java index 78bc6e27eb7d..dd4fbd5f21bc 100644 --- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/query/QueryWorkflowImplTest.java +++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/query/QueryWorkflowImplTest.java @@ -465,6 +465,7 @@ void testConstructorWithIllegalParameters() { null, true)) .isInstanceOf(NullPointerException.class); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @SuppressWarnings("ConstantConditions") @@ -477,6 +478,7 @@ void testHandleQueryWithIllegalParameters() { // then assertThatThrownBy(() -> workflow.handleQuery(null, responseBuffer)).isInstanceOf(NullPointerException.class); assertThatThrownBy(() -> workflow.handleQuery(requestBuffer, null)).isInstanceOf(NullPointerException.class); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @ParameterizedTest @@ -510,6 +512,7 @@ void testSuccessIfPaymentNotRequired(boolean shouldCharge) throws ParseException assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @ParameterizedTest @@ -552,6 +555,7 @@ void testSuccessIfPaymentRequired(boolean shouldCharge) throws ParseException { assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -585,6 +589,7 @@ void testSuccessIfPaymentRequiredAndNotProvided() throws ParseException, PreChec assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -622,6 +627,7 @@ void testSuccessIfCostOnly() throws ParseException { assertThat(header.responseType()).isEqualTo(COST_ANSWER); assertThat(header.cost()).isEqualTo(fees.totalFee()); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -635,6 +641,7 @@ void testParsingFails() throws ParseException { assertThatThrownBy(() -> workflow.handleQuery(requestBuffer, responseBuffer)) .isInstanceOf(StatusRuntimeException.class) .hasFieldOrPropertyWithValue("status", Status.INVALID_ARGUMENT); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -648,6 +655,7 @@ void testUnrecognizableQueryTypeFails() throws ParseException { assertThatThrownBy(() -> workflow.handleQuery(requestBuffer, responseBuffer)) .isInstanceOf(StatusRuntimeException.class) .hasFieldOrPropertyWithValue("status", Status.INVALID_ARGUMENT); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -659,6 +667,7 @@ void testUnknownQueryParamFails() throws ParseException { // then assertThrows(StatusRuntimeException.class, () -> workflow.handleQuery(requestBuffer, responseBuffer)); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -677,6 +686,7 @@ void testInvalidNodeFails() throws PreCheckException, ParseException { assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -705,12 +715,13 @@ void testUnsupportedResponseTypeFails() throws ParseException { assertThat(header.responseType()).isEqualTo(ANSWER_STATE_PROOF); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test void testThrottleFails() throws ParseException { // given - when(synchronizedThrottleAccumulator.shouldThrottle(eq(HederaFunctionality.FILE_GET_INFO), any(), any())) + when(synchronizedThrottleAccumulator.shouldThrottle(eq(HederaFunctionality.FILE_GET_INFO), any(), any(), any())) .thenReturn(true); final var responseBuffer = newEmptyBuffer(); @@ -724,6 +735,7 @@ void testThrottleFails() throws ParseException { assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics).incrementThrottled(FILE_GET_INFO); } @Test @@ -745,7 +757,7 @@ void testThrottleDoesNotFailWhenWorkflowShouldNotCharge() throws ParseException instantSource, opWorkflowMetrics, false); - when(synchronizedThrottleAccumulator.shouldThrottle(eq(HederaFunctionality.FILE_GET_INFO), any(), any())) + when(synchronizedThrottleAccumulator.shouldThrottle(eq(HederaFunctionality.FILE_GET_INFO), any(), any(), any())) .thenReturn(true); final var responseBuffer = newEmptyBuffer(); @@ -758,6 +770,7 @@ void testThrottleDoesNotFailWhenWorkflowShouldNotCharge() throws ParseException assertThat(header.nodeTransactionPrecheckCode()).isEqualTo(OK); assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -779,6 +792,7 @@ void testPaidQueryWithInvalidTransactionFails() throws PreCheckException, ParseE assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -798,6 +812,7 @@ void testPaidQueryWithInvalidCryptoTransferFails() throws PreCheckException, Par assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -826,6 +841,7 @@ void testPaidQueryForSuperUserDoesNotSubmitCryptoTransfer() throws PreCheckExcep verify(submissionManager, never()).submit(any(), any()); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -847,6 +863,7 @@ void testPaidQueryWithInsufficientPermissionFails() throws PreCheckException, Pa assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -872,6 +889,7 @@ void testPaidQueryWithInsufficientBalanceFails() throws PreCheckException, Parse assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isEqualTo(12345L); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -912,6 +930,7 @@ void testUnpaidQueryWithRestrictedFunctionalityFails(@Mock NetworkGetExecutionTi assertThat(header.responseType()).isEqualTo(COST_ANSWER); assertThat(header.cost()).isZero(); verify(opWorkflowMetrics).updateDuration(eq(NETWORK_GET_EXECUTION_TIME), anyInt()); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -935,6 +954,7 @@ void testQuerySpecificValidationFails() throws PreCheckException, ParseException final var queryContext = captor.getValue(); assertThat(queryContext.payer()).isNull(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } @Test @@ -957,6 +977,7 @@ void testPaidQueryWithFailingSubmissionFails() throws PreCheckException, ParseEx assertThat(header.responseType()).isEqualTo(ANSWER_ONLY); assertThat(header.cost()).isZero(); verifyMetricsSent(); + verify(opWorkflowMetrics, never()).incrementThrottled(any()); } private void verifyMetricsSent() { diff --git a/hedera-node/hedera-app/src/test/resources/bootstrap/throttles.json b/hedera-node/hedera-app/src/test/resources/bootstrap/throttles.json index 9946531e3120..60803f620376 100644 --- a/hedera-node/hedera-app/src/test/resources/bootstrap/throttles.json +++ b/hedera-node/hedera-app/src/test/resources/bootstrap/throttles.json @@ -69,7 +69,6 @@ "throttleGroups": [ { "operations": [ - "CryptoGetAccountBalance", "TransactionGetReceipt" ], "opsPerSec": 10 @@ -81,6 +80,18 @@ "opsPerSec": 1 } ] + }, + { + "name": "E", + "burstPeriod": 1, + "throttleGroups": [ + { + "operations": [ + "CryptoGetAccountBalance" + ], + "opsPerSec": 6 + } + ] } ] } diff --git a/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/BlockStreamConfig.java b/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/BlockStreamConfig.java index b536e39b6749..5a685592471d 100644 --- a/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/BlockStreamConfig.java +++ b/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/BlockStreamConfig.java @@ -25,14 +25,14 @@ /** * Configuration for the block stream. - * @param streamMode Default value of RECORDS disables the block stream; BOTH enables it + * @param streamMode Value of RECORDS disables the block stream; BOTH enables it * @param writerMode if we are writing to a file or gRPC stream * @param blockFileDir directory to store block files * @param compressFilesOnCreation whether to compress files on creation */ @ConfigData("blockStream") public record BlockStreamConfig( - @ConfigProperty(defaultValue = "RECORDS") @NetworkProperty StreamMode streamMode, + @ConfigProperty(defaultValue = "BOTH") @NetworkProperty StreamMode streamMode, @ConfigProperty(defaultValue = "FILE") @NodeProperty BlockStreamWriterMode writerMode, @ConfigProperty(defaultValue = "data/block-streams") @NodeProperty String blockFileDir, @ConfigProperty(defaultValue = "true") @NetworkProperty boolean compressFilesOnCreation, diff --git a/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/TokensConfig.java b/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/TokensConfig.java index 30a7adabf07d..a9efeda64864 100644 --- a/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/TokensConfig.java +++ b/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/TokensConfig.java @@ -31,7 +31,7 @@ public record TokensConfig( @ConfigProperty(defaultValue = "100") @NetworkProperty int maxTokenNameUtf8Bytes, @ConfigProperty(defaultValue = "10") @NetworkProperty int maxCustomFeesAllowed, @ConfigProperty(defaultValue = "2") @NetworkProperty int maxCustomFeeDepth, - @ConfigProperty(defaultValue = "1000") @NetworkProperty long maxRelsPerInfoQuery, + @ConfigProperty(defaultValue = "1000") @NetworkProperty int maxRelsPerInfoQuery, @ConfigProperty(value = "reject.enabled", defaultValue = "true") @NetworkProperty boolean tokenRejectEnabled, @ConfigProperty(value = "nfts.areEnabled", defaultValue = "true") @NetworkProperty boolean nftsAreEnabled, @ConfigProperty(value = "nfts.maxMetadataBytes", defaultValue = "100") @NetworkProperty @@ -66,4 +66,5 @@ public record TokensConfig( @ConfigProperty(value = "airdrops.claim.enabled", defaultValue = "true") @NetworkProperty boolean airdropsClaimEnabled, @ConfigProperty(value = "nfts.maxBatchSizeUpdate", defaultValue = "10") @NetworkProperty - int nftsMaxBatchSizeUpdate) {} + int nftsMaxBatchSizeUpdate, + @ConfigProperty(defaultValue = "false") @NetworkProperty boolean countingGetBalanceThrottleEnabled) {} diff --git a/hedera-node/hedera-network-admin-service-impl/src/test/java/com/hedera/node/app/service/networkadmin/impl/test/handlers/ReadableFreezeUpgradeActionsTest.java b/hedera-node/hedera-network-admin-service-impl/src/test/java/com/hedera/node/app/service/networkadmin/impl/test/handlers/ReadableFreezeUpgradeActionsTest.java index dad9274b1051..3bd4b080f7a6 100644 --- a/hedera-node/hedera-network-admin-service-impl/src/test/java/com/hedera/node/app/service/networkadmin/impl/test/handlers/ReadableFreezeUpgradeActionsTest.java +++ b/hedera-node/hedera-network-admin-service-impl/src/test/java/com/hedera/node/app/service/networkadmin/impl/test/handlers/ReadableFreezeUpgradeActionsTest.java @@ -116,6 +116,8 @@ class ReadableFreezeUpgradeActionsTest { KEY_BUILDER.apply(B_NAME).build(), A_THRESHOLD_KEY))) .build(); + private static final Bytes TSS_KEY = Bytes.wrap(new byte[] {1, 2, 3}); + private Path noiseFileLoc; private Path noiseSubFileLoc; private Path zipArchivePath; // path to valid.zip test zip file (in zipSourceDir directory) @@ -427,7 +429,8 @@ private void setupNodes() throws CertificateException, IOException { Bytes.wrap("grpc1CertificateHash"), 2, false, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var node2 = new Node( 2, asAccount(4), @@ -440,7 +443,8 @@ private void setupNodes() throws CertificateException, IOException { Bytes.wrap("grpc2CertificateHash"), 4, false, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var node3 = new Node( 3, asAccount(6), @@ -453,7 +457,8 @@ private void setupNodes() throws CertificateException, IOException { Bytes.wrap("grpc3CertificateHash"), 1, true, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var node4 = new Node( 4, asAccount(8), @@ -467,7 +472,8 @@ private void setupNodes() throws CertificateException, IOException { Bytes.wrap("grpc5CertificateHash"), 8, false, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var readableNodeState = MapReadableKVState.builder(NODES_KEY) .value(new EntityNumber(4), node4) .value(new EntityNumber(2), node2) @@ -551,7 +557,8 @@ private void setupNodes2() throws CertificateException, IOException { Bytes.wrap("grpc1CertificateHash"), 2, false, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var node2 = new Node( 1, asAccount(4), @@ -564,7 +571,8 @@ private void setupNodes2() throws CertificateException, IOException { Bytes.wrap("grpc2CertificateHash"), 4, false, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var node3 = new Node( 2, asAccount(6), @@ -577,7 +585,8 @@ private void setupNodes2() throws CertificateException, IOException { Bytes.wrap("grpc3CertificateHash"), 1, false, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var node4 = new Node( 3, asAccount(8), @@ -591,7 +600,8 @@ private void setupNodes2() throws CertificateException, IOException { Bytes.wrap("grpc5CertificateHash"), 8, true, - A_COMPLEX_KEY); + A_COMPLEX_KEY, + TSS_KEY); final var readableNodeState = MapReadableKVState.builder(NODES_KEY) .value(new EntityNumber(3), node4) .value(new EntityNumber(1), node2) diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java index 8f9ee0799f12..0be8e9453fbc 100644 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java +++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java @@ -16,337 +16,251 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_SCHEDULE_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TRANSACTION; +import static com.hedera.hapi.node.base.ResponseCodeEnum.OK; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_ALREADY_DELETED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SUCCESS; +import static com.hedera.hapi.node.base.ResponseCodeEnum.UNRESOLVABLE_REQUIRED_SIGNERS; +import static com.hedera.hapi.util.HapiUtils.asTimestamp; +import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.childAsOrdinary; import static com.hedera.node.app.spi.workflows.HandleContext.ConsensusThrottling.ON; +import static java.util.Objects.requireNonNull; import com.hedera.hapi.node.base.AccountID; import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.base.ScheduleID; -import com.hedera.hapi.node.base.Timestamp; -import com.hedera.hapi.node.base.TransactionID; import com.hedera.hapi.node.state.schedule.Schedule; -import com.hedera.hapi.node.state.token.Account; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.node.app.service.schedule.ReadableScheduleStore; import com.hedera.node.app.service.schedule.ScheduleStreamBuilder; -import com.hedera.node.app.service.token.ReadableAccountStore; import com.hedera.node.app.spi.key.KeyComparator; -import com.hedera.node.app.spi.signatures.SignatureVerification; +import com.hedera.node.app.spi.signatures.VerificationAssistant; import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.spi.workflows.HandleContext.TransactionCategory; import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.PreCheckException; -import com.hedera.node.app.spi.workflows.PreHandleContext; import com.hedera.node.app.spi.workflows.TransactionKeys; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.time.Instant; +import java.util.ArrayList; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.SortedSet; import java.util.concurrent.ConcurrentSkipListSet; -import java.util.function.Predicate; /** * Provides some implementation support needed for both the {@link ScheduleCreateHandler} and {@link * ScheduleSignHandler}. */ abstract class AbstractScheduleHandler { - protected static final String NULL_CONTEXT_MESSAGE = - "Dispatcher called the schedule handler with a null context; probable internal data corruption."; + private static final Comparator KEY_COMPARATOR = new KeyComparator(); - /** - * A simple record to return both "deemed valid" signatories and remaining primitive keys that must sign. - * - * @param updatedSignatories a Set of "deemed valid" signatories, possibly updated with new entries - * @param remainingRequiredKeys A Set of Key entries that have not yet signed the scheduled transaction, but - * must sign that transaction before it can be executed. - */ - protected record ScheduleKeysResult(Set updatedSignatories, Set remainingRequiredKeys) {} + @FunctionalInterface + protected interface TransactionKeysFn { + TransactionKeys apply(@NonNull TransactionBody body, @NonNull AccountID payerId) throws PreCheckException; + } /** - * Gets the set of all the keys required to sign a transaction. - * - * @param scheduleInState the schedule in state - * @param context the Prehandle context - * @return the set of keys required to sign the transaction - * @throws PreCheckException if the transaction cannot be handled successfully due to a validation failure of the - * dispatcher related to signer requirements or other pre-validation criteria. + * Gets the {@link TransactionKeys} summarizing a schedule's signing requirements. + * @param schedule the schedule + * @param fn the function to get required keys by category + * @return the schedule's signing requirements + * @throws HandleException if the signing requirements cannot be determined */ - @NonNull - protected Set allKeysForTransaction( - @NonNull final Schedule scheduleInState, @NonNull final PreHandleContext context) throws PreCheckException { - final TransactionBody scheduledAsOrdinary = HandlerUtility.childAsOrdinary(scheduleInState); - final AccountID originalCreatePayer = - scheduleInState.originalCreateTransaction().transactionID().accountID(); - // note, payerAccount will never be null, but we're dealing with Sonar here. - final AccountID payerForNested = scheduleInState.payerAccountIdOrElse(originalCreatePayer); - final TransactionKeys keyStructure = context.allKeysForTransaction(scheduledAsOrdinary, payerForNested); - return getKeySetFromTransactionKeys(keyStructure); + protected @NonNull TransactionKeys getTransactionKeysOrThrow( + @NonNull final Schedule schedule, @NonNull final TransactionKeysFn fn) throws HandleException { + requireNonNull(schedule); + requireNonNull(fn); + try { + return getRequiredKeys(schedule, fn); + } catch (final PreCheckException e) { + throw new HandleException(e.responseCode()); + } } /** - * Get the schedule keys result to sign the transaction. - * - * @param scheduleInState the schedule in state - * @param context the Prehandle context - * @return the schedule keys result containing the updated signatories and the remaining required keys - * @throws HandleException if any validation check fails when getting the keys for the transaction + * Gets the {@link TransactionKeys} summarizing a schedule's signing requirements. + * @param schedule the schedule + * @param fn the function to get required keys by category + * @return the schedule's signing requirements + * @throws PreCheckException if the signing requirements cannot be determined */ @NonNull - protected ScheduleKeysResult allKeysForTransaction( - @NonNull final Schedule scheduleInState, @NonNull final HandleContext context) throws HandleException { - final AccountID originalCreatePayer = - scheduleInState.originalCreateTransaction().transactionID().accountID(); - // note, payerAccount should never be null, but we're playing it safe here. - final AccountID payer = scheduleInState.payerAccountIdOrElse(originalCreatePayer); - final TransactionBody scheduledAsOrdinary = HandlerUtility.childAsOrdinary(scheduleInState); - final TransactionKeys keyStructure; - try { - keyStructure = context.allKeysForTransaction(scheduledAsOrdinary, payer); - // @todo('9447') We have an issue here. Currently, allKeysForTransaction fails in many cases where a - // key is currently unavailable, but could be in the future. We need the keys, even - // if the transaction is currently invalid, because we may create and sign schedules for - // invalid transactions, then only fail when the transaction is executed. This would allow - // (e.g.) scheduling the transfer of a dApp service fee from a newly created account to be - // set up before the account (or key) is created; then the new account, once funded, signs - // the scheduled transaction and the funds are immediately transferred. Currently that - // would fail on create. Long-term we should fix that. - } catch (final PreCheckException translated) { - throw new HandleException(translated.responseCode()); + protected TransactionKeys getRequiredKeys(@NonNull final Schedule schedule, @NonNull final TransactionKeysFn fn) + throws PreCheckException { + requireNonNull(schedule); + requireNonNull(fn); + final var body = childAsOrdinary(schedule); + final var creatorId = schedule.originalCreateTransactionOrThrow() + .transactionIDOrThrow() + .accountIDOrThrow(); + final var payerId = schedule.payerAccountIdOrElse(creatorId); + final var transactionKeys = fn.apply(body, payerId); + // We do not currently support scheduling transactions that would need to complete hollow accounts + if (!transactionKeys.requiredHollowAccounts().isEmpty()) { + throw new PreCheckException(UNRESOLVABLE_REQUIRED_SIGNERS); } - final Set scheduledRequiredKeys = getKeySetFromTransactionKeys(keyStructure); - // Ensure the *custom* payer is required; some rare corner cases may not require it otherwise. - final Key payerKey = getKeyForAccount(context, payer); - if (hasCustomPayer(scheduleInState) && payerKey != null) scheduledRequiredKeys.add(payerKey); - final Set currentSignatories = setOfKeys(scheduleInState.signatories()); - final Set remainingRequiredKeys = - filterRemainingRequiredKeys(context, scheduledRequiredKeys, currentSignatories, originalCreatePayer); - // Mono doesn't store extra signatures, so for now we mustn't either. - // This is structurally wrong for long term schedules, so we must remove this later. - // @todo('9447') Stop removing currently unused signatures, just store all the verified signatures until - // there are enough to execute, so we don't discard a signature now that would be required later. - HandlerUtility.filterSignatoriesToRequired(currentSignatories, scheduledRequiredKeys); - return new ScheduleKeysResult(currentSignatories, remainingRequiredKeys); - } - - private boolean hasCustomPayer(final Schedule scheduleToCheck) { - final AccountID originalCreatePayer = - scheduleToCheck.originalCreateTransaction().transactionID().accountID(); - final AccountID assignedPayer = scheduleToCheck.payerAccountId(); - // Will never be null, but Sonar doesn't know that. - return assignedPayer != null && !assignedPayer.equals(originalCreatePayer); + return transactionKeys; } /** - * Verify that at least one "new" required key signed the transaction. - *

- * If there exists a {@link Key} nKey, a member of newSignatories, such that nKey is not - * in existingSignatories, then a new key signed. Otherwise an {@link HandleException} is - * thrown with status {@link ResponseCodeEnum#NO_NEW_VALID_SIGNATURES}. - * - * @param existingSignatories a List of signatories representing all prior signatures before the current - * ScheduleSign transaction. - * @param newSignatories a Set of signatories representing all signatures following the current ScheduleSign - * transaction. - * @throws HandleException if there are no new signatures compared to the prior state. + * Gets all required keys for a transaction, including the payer key and all non-payer keys. + * @param keys the transaction keys + * @return the required keys */ - protected void verifyHasNewSignatures( - @NonNull final List existingSignatories, @NonNull final Set newSignatories) - throws HandleException { - SortedSet preExisting = setOfKeys(existingSignatories); - if (preExisting.containsAll(newSignatories)) { - throw new HandleException(ResponseCodeEnum.NO_NEW_VALID_SIGNATURES); - } + protected @NonNull List allRequiredKeys(@NonNull final TransactionKeys keys) { + final var all = new ArrayList(); + all.add(keys.payerKey()); + all.addAll(keys.requiredNonPayerKeys()); + return all; } /** - * Gets key for account. - * - * @param context the handle context - * @param accountToQuery the account to query - * @return the key for account + * Given a set of signing crypto keys, a list of signatories, and a list of required keys, returns a new list of + * signatories that includes all the original signatories and any crypto keys that are both constituents of the + * required keys and in the signing crypto keys set. + * @param signingCryptoKeys the signing crypto keys + * @param signatories the original signatories + * @param requiredKeys the required keys + * @return the new signatories */ - @Nullable - protected Key getKeyForAccount(@NonNull final HandleContext context, @NonNull final AccountID accountToQuery) { - final ReadableAccountStore accountStore = context.storeFactory().readableStore(ReadableAccountStore.class); - final Account accountData = accountStore.getAccountById(accountToQuery); - return (accountData != null && accountData.key() != null) ? accountData.key() : null; + protected @NonNull List newSignatories( + @NonNull final SortedSet signingCryptoKeys, + @NonNull final List signatories, + @NonNull final List requiredKeys) { + requireNonNull(signingCryptoKeys); + requireNonNull(signatories); + requireNonNull(requiredKeys); + final var newSignatories = new ConcurrentSkipListSet<>(KEY_COMPARATOR); + newSignatories.addAll(signatories); + requiredKeys.forEach(k -> accumulateNewSignatories(newSignatories, signingCryptoKeys, k)); + return new ArrayList<>(newSignatories); } /** - * Given a transaction body and schedule store, validate the transaction meets minimum requirements to - * be completed. - *

This method checks that the Schedule ID is not null, references a schedule in readable state, - * and the referenced schedule has a child transaction. - * The full set of checks in the {@link #validate(Schedule, Instant, boolean)} method must also - * pass. - * If all validation checks pass, the schedule metadata is returned. - * If any checks fail, then a {@link PreCheckException} is thrown.

- * @param idToValidate the ID of the schedule to validate - * @param scheduleStore data from readable state which contains, at least, a metadata entry for the schedule - * that the current transaction will sign. - * @throws PreCheckException if the ScheduleSign transaction provided fails any of the required validation - * checks. + * Either returns a schedule from the given store with the given id, ready to be modified, or throws a + * {@link PreCheckException} if the schedule is not found or is not in a valid state. + * + * @param scheduleId the schedule to get and validate + * @param scheduleStore the schedule store + * @throws PreCheckException if the schedule is not found or is not in a valid state */ @NonNull - protected Schedule preValidate( + protected Schedule getValidated( + @NonNull final ScheduleID scheduleId, @NonNull final ReadableScheduleStore scheduleStore, - final boolean isLongTermEnabled, - @Nullable final ScheduleID idToValidate) + final boolean isLongTermEnabled) throws PreCheckException { - if (idToValidate != null) { - final Schedule scheduleData = scheduleStore.get(idToValidate); - if (scheduleData != null) { - if (scheduleData.scheduledTransaction() != null) { - final ResponseCodeEnum validationResult = validate(scheduleData, null, isLongTermEnabled); - if (validationResult == ResponseCodeEnum.OK) { - return scheduleData; - } else { - throw new PreCheckException(validationResult); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } + requireNonNull(scheduleId); + requireNonNull(scheduleStore); + final var schedule = scheduleStore.get(scheduleId); + final var validationResult = validate(schedule, null, isLongTermEnabled); + if (validationResult == OK) { + return requireNonNull(schedule); } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_SCHEDULE_ID); + throw new PreCheckException(validationResult); } } /** * Given a schedule, consensus time, and long term scheduling enabled flag, validate the transaction - * meets minimum requirements to be handled. + * meets minimum requirements to be handled. Returns {@link ResponseCodeEnum#OK} if the schedule is valid. *

- * This method checks that, as of the current consensus time, the schedule is + * This method checks that, as of the current consensus time, the schedule, *

    - *
  • not null
  • - *
  • has a scheduled transaction
  • - *
  • has not been executed
  • - *
  • is not deleted
  • - *
  • has not expired
  • + *
  • Is not null.
  • + *
  • Has a scheduled transaction.
  • + *
  • Has not been executed.
  • + *
  • Is not deleted.
  • + *
  • Has not expired.
  • *
- * - * @param scheduleToValidate the {@link Schedule} to validate. If this is null then - * {@link ResponseCodeEnum#INVALID_SCHEDULE_ID} is returned. - * @param consensusTime the consensus time {@link Instant} applicable to this transaction. - * If this is null then we assume this is a pre-check and do not validate expiration. - * @param isLongTermEnabled a flag indicating if long term scheduling is currently enabled. This modifies - * which response code is sent when a schedule is expired. - * @return a response code representing the result of the validation. This is {@link ResponseCodeEnum#OK} - * if all checks pass, or an appropriate failure code if any checks fail. + * @param schedule the schedule to validate + * @param consensusNow the current consensus time + * @param isLongTermEnabled whether long term scheduling is enabled + * @return the validation result */ @NonNull protected ResponseCodeEnum validate( - @Nullable final Schedule scheduleToValidate, - @Nullable final Instant consensusTime, - final boolean isLongTermEnabled) { - final ResponseCodeEnum result; - final Instant effectiveConsensusTime = Objects.requireNonNullElse(consensusTime, Instant.MIN); - if (scheduleToValidate != null) { - if (scheduleToValidate.hasScheduledTransaction()) { - if (!scheduleToValidate.executed()) { - if (!scheduleToValidate.deleted()) { - final long expiration = scheduleToValidate.calculatedExpirationSecond(); - final Instant calculatedExpiration = - (expiration != Schedule.DEFAULT.calculatedExpirationSecond() - ? Instant.ofEpochSecond(expiration) - : Instant.MAX); - if (calculatedExpiration.getEpochSecond() >= effectiveConsensusTime.getEpochSecond()) { - result = ResponseCodeEnum.OK; - } else { - // We are past expiration time - if (!isLongTermEnabled) { - result = ResponseCodeEnum.INVALID_SCHEDULE_ID; - } else { - // This is not failure, it indicates the schedule should execute if it can, - // or be removed if it is not executable (i.e. it lacks required signatures) - result = ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION; - } - } - } else { - result = ResponseCodeEnum.SCHEDULE_ALREADY_DELETED; - } - } else { - result = ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED; - } - } else { - result = ResponseCodeEnum.INVALID_TRANSACTION; - } + @Nullable final Schedule schedule, @Nullable final Instant consensusNow, final boolean isLongTermEnabled) { + if (schedule == null) { + return INVALID_SCHEDULE_ID; + } + if (!schedule.hasScheduledTransaction()) { + return INVALID_TRANSACTION; + } + if (schedule.executed()) { + return SCHEDULE_ALREADY_EXECUTED; + } + if (schedule.deleted()) { + return SCHEDULE_ALREADY_DELETED; + } + final long expiration = schedule.calculatedExpirationSecond(); + final var calculatedExpiration = (expiration != Schedule.DEFAULT.calculatedExpirationSecond() + ? Instant.ofEpochSecond(expiration) + : Instant.MAX); + final var effectiveNow = Objects.requireNonNullElse(consensusNow, Instant.MIN); + if (calculatedExpiration.getEpochSecond() >= effectiveNow.getEpochSecond()) { + return OK; } else { - result = ResponseCodeEnum.INVALID_SCHEDULE_ID; + return isLongTermEnabled ? SCHEDULE_PENDING_EXPIRATION : INVALID_SCHEDULE_ID; } - return result; } /** - * Very basic transaction ID validation. - * This just checks that the transaction is not scheduled (you cannot schedule a schedule), - * that the account ID is not null (so we can fill in scheduler account), - * and that the start timestamp is not null (so we can fill in schedule valid start time) - * @param currentId a TransactionID to validate - * @throws PreCheckException if the transaction is scheduled, the account ID is null, or the start time is null. + * Indicates if the given validation result is one that may allow a validated schedule to be executed. + * @param validationResult the validation result + * @return if the schedule might be executable */ - protected void checkValidTransactionId(@Nullable final TransactionID currentId) throws PreCheckException { - if (currentId == null) throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_ID); - final AccountID payer = currentId.accountID(); - final Timestamp validStart = currentId.transactionValidStart(); - final boolean isScheduled = currentId.scheduled(); - if (isScheduled) throw new PreCheckException(ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); - if (payer == null) throw new PreCheckException(ResponseCodeEnum.INVALID_SCHEDULE_PAYER_ID); - if (validStart == null) throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_START); + protected boolean isMaybeExecutable(@NonNull final ResponseCodeEnum validationResult) { + return validationResult == OK || validationResult == SUCCESS || validationResult == SCHEDULE_PENDING_EXPIRATION; } /** - * Try to execute a schedule. Will attempt to execute a schedule if the remaining signatories are empty - * and the schedule is not waiting for expiration. - * - * @param context the context - * @param scheduleToExecute the schedule to execute - * @param remainingSignatories the remaining signatories - * @param validSignatories the valid signatories - * @param validationResult the validation result - * @param isLongTermEnabled the is long term enabled - * @return boolean indicating if the schedule was executed + * Tries to execute a schedule, if all conditions are met. Returns true if the schedule was executed. + * @param context the context + * @param schedule the schedule to execute + * @param validationResult the validation result + * @param isLongTermEnabled the is long term enabled + * @return if the schedule was executed */ protected boolean tryToExecuteSchedule( @NonNull final HandleContext context, - @NonNull final Schedule scheduleToExecute, - @NonNull final Set remainingSignatories, - @NonNull final Set validSignatories, + @NonNull final Schedule schedule, + @NonNull final List requiredKeys, @NonNull final ResponseCodeEnum validationResult, final boolean isLongTermEnabled) { - if (canExecute(remainingSignatories, isLongTermEnabled, validationResult, scheduleToExecute)) { - final AccountID originalPayer = scheduleToExecute - .originalCreateTransaction() - .transactionID() - .accountID(); - final Set acceptedSignatories = new HashSet<>(); - acceptedSignatories.addAll(validSignatories); - acceptedSignatories.add(getKeyForAccount(context, originalPayer)); - final Predicate assistant = new DispatchPredicate(acceptedSignatories); - // This sets the child transaction ID to scheduled. - final TransactionBody childTransaction = HandlerUtility.childAsOrdinary(scheduleToExecute); - final ScheduleStreamBuilder recordBuilder = context.dispatchChildTransaction( - childTransaction, - ScheduleStreamBuilder.class, - assistant, - scheduleToExecute.payerAccountId(), - TransactionCategory.SCHEDULED, - ON); - // If the child failed, we would prefer to fail with the same result. - // We do not fail, however, at least mono service code does not. - // We succeed and the record of the child transaction is failed. - // set the schedule ref for the child transaction to the schedule that we're executing - recordBuilder.scheduleRef(scheduleToExecute.scheduleId()); - // also set the child transaction ID as scheduled transaction ID in the parent record. - final ScheduleStreamBuilder parentRecordBuilder = - context.savepointStack().getBaseBuilder(ScheduleStreamBuilder.class); - parentRecordBuilder.scheduledTransactionID(childTransaction.transactionID()); + requireNonNull(context); + requireNonNull(schedule); + requireNonNull(requiredKeys); + requireNonNull(validationResult); + + final var signatories = new HashSet<>(schedule.signatories()); + final VerificationAssistant callback = (k, ignore) -> signatories.contains(k); + final var remainingKeys = new HashSet<>(requiredKeys); + remainingKeys.removeIf( + k -> context.keyVerifier().verificationFor(k, callback).passed()); + final boolean isExpired = validationResult == SCHEDULE_PENDING_EXPIRATION; + if (canExecute(schedule, remainingKeys, isExpired, isLongTermEnabled)) { + final var body = childAsOrdinary(schedule); + context.dispatchChildTransaction( + body, + ScheduleStreamBuilder.class, + signatories::contains, + schedule.payerAccountIdOrThrow(), + TransactionCategory.SCHEDULED, + ON) + .scheduleRef(schedule.scheduleId()); + context.savepointStack() + .getBaseBuilder(ScheduleStreamBuilder.class) + .scheduledTransactionID(body.transactionID()); return true; } else { return false; @@ -354,84 +268,69 @@ protected boolean tryToExecuteSchedule( } /** - * Checks if the validation is OK, SUCCESS, or SCHEDULE_PENDING_EXPIRATION. - * - * @param validationResult the validation result - * @return boolean indicating status of the validation + * Returns a version of the given schedule marked as executed at the given time. + * @param schedule the schedule to mark as executed + * @param consensusNow the time to mark the schedule as executed + * @return the marked schedule */ - protected boolean validationOk(final ResponseCodeEnum validationResult) { - return validationResult == ResponseCodeEnum.OK - || validationResult == ResponseCodeEnum.SUCCESS - || validationResult == ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION; - } - - @NonNull - private SortedSet getKeySetFromTransactionKeys(final TransactionKeys requiredKeys) { - final SortedSet scheduledRequiredKeys = new ConcurrentSkipListSet<>(new KeyComparator()); - scheduledRequiredKeys.addAll(requiredKeys.requiredNonPayerKeys()); - scheduledRequiredKeys.addAll(requiredKeys.optionalNonPayerKeys()); - return scheduledRequiredKeys; + protected static @NonNull Schedule markedExecuted( + @NonNull final Schedule schedule, @NonNull final Instant consensusNow) { + return schedule.copyBuilder() + .executed(true) + .resolutionTime(asTimestamp(consensusNow)) + .build(); } - private SortedSet filterRemainingRequiredKeys( - final HandleContext context, - final Set scheduledRequiredKeys, - final Set currentSignatories, - final AccountID originalCreatePayer) { - // the final output must be a sorted/ordered set. - final KeyComparator keyMatcher = new KeyComparator(); - final SortedSet remainingKeys = new ConcurrentSkipListSet<>(keyMatcher); - final Set currentUnverifiedKeys = new HashSet<>(1); - final Key originalPayerKey = getKeyForAccount(context, originalCreatePayer); - final var assistant = new ScheduleVerificationAssistant(currentSignatories, currentUnverifiedKeys); - for (final Key next : scheduledRequiredKeys) { - // The schedule verification assistant observes each primitive key in the tree - final SignatureVerification isVerified = context.keyVerifier().verificationFor(next, assistant); - // unverified primitive keys only count if the top-level key failed verification. - // @todo('9447') The comparison to originalPayerKey here is to match monoservice - // "hidden default payer" behavior. We intend to remove that behavior after v1 - // release as it is not considered fully "correct", particularly for long term schedules. - if (!isVerified.passed() && keyMatcher.compare(next, originalPayerKey) != 0) { - remainingKeys.addAll(currentUnverifiedKeys); - } - currentUnverifiedKeys.clear(); + /** + * Evaluates whether a schedule with given remaining signatories, validation result, and can be executed + * in the context of long-term scheduling on or off. + * + * @param schedule the schedule to execute + * @param remainingKeys the remaining keys that must sign + * @param isExpired whether the schedule is expired + * @param isLongTermEnabled the long term scheduling flag + * @return boolean indicating if the schedule can be executed + */ + private boolean canExecute( + @NonNull final Schedule schedule, + @NonNull final Set remainingKeys, + final boolean isExpired, + final boolean isLongTermEnabled) { + // We can only execute if there are no remaining keys required to sign + if (!remainingKeys.isEmpty()) { + return false; + } + // If long-term transactions are disabled, everything executes immediately + if (!isLongTermEnabled) { + return true; } - return remainingKeys; + // Otherwise we can only execute in two cases, + // (1) The schedule is allowed to execute immediately, and is not expired. + // (2) The schedule is waiting for its expiry to execute, and is expired. + return schedule.waitForExpiry() == isExpired; } /** - * Given an arbitrary {@code Iterable}, return a modifiable {@code SortedSet} containing - * the same objects as the input. - * This set must be sorted to ensure a deterministic order of values in state. - * If there are any duplicates in the input, only one of each will be in the result. - * If there are any null values in the input, those values will be excluded from the result. - * @param keyCollection an Iterable of Key values. - * @return a modifiable {@code SortedSet} containing the same contents as the input with duplicates - * and null values excluded + * Accumulates the valid signatories from a key structure into a set of signatories. + * @param signatories the set of signatories to accumulate into + * @param signingCryptoKeys the signing crypto keys + * @param key the key structure to accumulate signatories from */ - @NonNull - private SortedSet setOfKeys(@Nullable final Iterable keyCollection) { - if (keyCollection != null) { - final SortedSet results = new ConcurrentSkipListSet<>(new KeyComparator()); - for (final Key next : keyCollection) { - if (next != null) results.add(next); + private void accumulateNewSignatories( + @NonNull final Set signatories, @NonNull final Set signingCryptoKeys, @NonNull final Key key) { + switch (key.key().kind()) { + case ED25519, ECDSA_SECP256K1 -> { + if (signingCryptoKeys.contains(key)) { + signatories.add(key); + } } - return results; - } else { - // cannot use Set.of() or Collections.emptySet() here because those are unmodifiable and unsorted. - return new ConcurrentSkipListSet<>(new KeyComparator()); + case KEY_LIST -> key.keyListOrThrow() + .keys() + .forEach(k -> accumulateNewSignatories(signatories, signingCryptoKeys, k)); + case THRESHOLD_KEY -> key.thresholdKeyOrThrow() + .keysOrThrow() + .keys() + .forEach(k -> accumulateNewSignatories(signatories, signingCryptoKeys, k)); } } - - private boolean canExecute( - final Set remainingSignatories, - final boolean isLongTermEnabled, - final ResponseCodeEnum validationResult, - final Schedule scheduleToExecute) { - // either we're waiting and pending, or not waiting and not pending - final boolean longTermReady = - scheduleToExecute.waitForExpiry() == (validationResult == ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION); - final boolean allSignturesGathered = remainingSignatories == null || remainingSignatories.isEmpty(); - return allSignturesGathered && (!isLongTermEnabled || longTermReady); - } } diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/DispatchPredicate.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/DispatchPredicate.java deleted file mode 100644 index 9768b3e7fc2e..000000000000 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/DispatchPredicate.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) 2023-2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.hedera.node.app.service.schedule.impl.handlers; - -import static java.util.Objects.requireNonNull; - -import com.hedera.hapi.node.base.Key; -import com.hedera.node.app.spi.signatures.VerificationAssistant; -import com.hedera.node.app.spi.workflows.HandleContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Set; -import java.util.function.Predicate; - -/** - * Predicate for child dispatch key validation required because {@link HandleContext} no longer - * allows a {@link VerificationAssistant} to be used for dispatch. - */ -public class DispatchPredicate implements Predicate { - private final Set preValidatedKeys; - - /** - * Create a new DispatchPredicate using the given set of keys as deemed-valid. - * - * @param preValidatedKeys an unmodifiable {@code Set} of primitive keys - * previously verified. - */ - public DispatchPredicate(@NonNull final Set preValidatedKeys) { - this.preValidatedKeys = requireNonNull(preValidatedKeys); - } - - @Override - public boolean test(@NonNull final Key key) { - return preValidatedKeys.contains(requireNonNull(key)); - } -} diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtility.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtility.java index 59a12eb82eb6..f8df4d1b16f0 100644 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtility.java +++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtility.java @@ -16,26 +16,19 @@ package com.hedera.node.app.service.schedule.impl.handlers; -import com.hedera.hapi.node.base.AccountID; import com.hedera.hapi.node.base.HederaFunctionality; -import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.base.ScheduleID; -import com.hedera.hapi.node.base.ScheduleID.Builder; import com.hedera.hapi.node.base.Timestamp; import com.hedera.hapi.node.base.TransactionID; import com.hedera.hapi.node.scheduled.SchedulableTransactionBody; import com.hedera.hapi.node.scheduled.SchedulableTransactionBody.DataOneOfType; -import com.hedera.hapi.node.scheduled.ScheduleCreateTransactionBody; import com.hedera.hapi.node.state.schedule.Schedule; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.node.app.spi.workflows.HandleException; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.time.Instant; -import java.util.Collection; -import java.util.List; -import java.util.Set; /** * A package-private utility class for Schedule Handlers. @@ -176,216 +169,66 @@ static HederaFunctionality functionalityForType(final DataOneOfType transactionT }; } - /** - * Given a Schedule, return a copy of that schedule with the executed flag and resolution time set. - * @param schedule a {@link Schedule} to mark executed. - * @param consensusTime the current consensus time, used to set {@link Schedule#resolutionTime()}. - * @return a new Schedule which matches the input, except that the execute flag is set and the resolution time - * is set to the consensusTime provided. - */ - @NonNull - static Schedule markExecuted(@NonNull final Schedule schedule, @NonNull final Instant consensusTime) { - final Timestamp consensusTimestamp = new Timestamp(consensusTime.getEpochSecond(), consensusTime.getNano()); - return schedule.copyBuilder() - .executed(true) - .resolutionTime(consensusTimestamp) - .build(); - } - - /** - * Replace the signatories of a schedule with a new set of signatories. - * The schedule is not modified in place. - * - * @param schedule the schedule - * @param newSignatories the new signatories - * @return the schedule - */ - @NonNull - static Schedule replaceSignatories(@NonNull final Schedule schedule, @NonNull final Set newSignatories) { - return schedule.copyBuilder().signatories(List.copyOf(newSignatories)).build(); - } - - /** - * Replace signatories and mark executed schedule. - * - * @param schedule the schedule - * @param newSignatories the new signatories - * @param consensusTime the consensus time - * @return the schedule - */ - @NonNull - static Schedule replaceSignatoriesAndMarkExecuted( - @NonNull final Schedule schedule, - @NonNull final Set newSignatories, - @NonNull final Instant consensusTime) { - final Timestamp consensusTimestamp = new Timestamp(consensusTime.getEpochSecond(), consensusTime.getNano()); - final Schedule.Builder builder = schedule.copyBuilder().executed(true).resolutionTime(consensusTimestamp); - return builder.signatories(List.copyOf(newSignatories)).build(); - } - /** * Create a new Schedule, but without an ID or signatories. * This method is used to create a schedule object for processing during a ScheduleCreate, but without the * schedule ID, as we still need to complete validation and other processing. Once all processing is complete, * a new ID is allocated and signatories are added immediately prior to storing the new object in state. - * @param currentTransaction The transaction body of the current Schedule Create transaction. We assume that + * @param body The transaction body of the current Schedule Create transaction. We assume that * the transaction is a ScheduleCreate, but require the less specific object so that we have access to * the transaction ID via {@link TransactionBody#transactionID()} from the TransactionBody stored in * the {@link Schedule#originalCreateTransaction()} attribute of the Schedule. - * @param currentConsensusTime The current consensus time for the network. - * @param maxLifeSeconds The maximum number of seconds a schedule is permitted to exist on the ledger + * @param consensusNow The current consensus time for the network. + * @param maxLifetime The maximum number of seconds a schedule is permitted to exist on the ledger * before it expires. * @return a newly created Schedule with a null schedule ID * @throws HandleException if the */ @NonNull static Schedule createProvisionalSchedule( - @NonNull final TransactionBody currentTransaction, - @NonNull final Instant currentConsensusTime, - final long maxLifeSeconds) - throws HandleException { - // The next three items will never be null, but Sonar is persnickety, so we force NPE if any are null. - final TransactionID parentTransactionId = currentTransaction.transactionIDOrThrow(); - final ScheduleCreateTransactionBody createTransaction = currentTransaction.scheduleCreateOrThrow(); - final AccountID schedulerAccount = parentTransactionId.accountIDOrThrow(); - final long calculatedExpirationTime = - calculateExpiration(createTransaction.expirationTime(), currentConsensusTime, maxLifeSeconds); - final ScheduleID nullId = null; - - Schedule.Builder builder = Schedule.newBuilder(); - builder.scheduleId(nullId).deleted(false).executed(false); - builder.waitForExpiry(createTransaction.waitForExpiry()); - builder.adminKey(createTransaction.adminKey()).schedulerAccountId(parentTransactionId.accountID()); - builder.payerAccountId(createTransaction.payerAccountIDOrElse(schedulerAccount)); - builder.schedulerAccountId(schedulerAccount); - builder.scheduleValidStart(parentTransactionId.transactionValidStart()); - builder.calculatedExpirationSecond(calculatedExpirationTime); - builder.providedExpirationSecond( - createTransaction.expirationTimeOrElse(Timestamp.DEFAULT).seconds()); - builder.originalCreateTransaction(currentTransaction); - builder.memo(createTransaction.memo()); - builder.scheduledTransaction(createTransaction.scheduledTransactionBody()); - return builder.build(); - } - - /** - * Complete the processing of a provisional schedule, which was created during a ScheduleCreate transaction. - * The schedule is completed by adding a schedule ID and signatories. - * - * @param provisionalSchedule the provisional schedule - * @param newEntityNumber the new entity number - * @param finalSignatories the final signatories for the schedule - * @return the schedule - * @throws HandleException if the transaction is not handled successfully. - */ - @NonNull - static Schedule completeProvisionalSchedule( - @NonNull final Schedule provisionalSchedule, - final long newEntityNumber, - @NonNull final Set finalSignatories) - throws HandleException { - final TransactionBody originalTransaction = provisionalSchedule.originalCreateTransactionOrThrow(); - final TransactionID parentTransactionId = originalTransaction.transactionIDOrThrow(); - final ScheduleID finalId = getNextScheduleID(parentTransactionId, newEntityNumber); - - Schedule.Builder build = provisionalSchedule.copyBuilder(); - build.scheduleId(finalId).deleted(false).executed(false); - build.schedulerAccountId(parentTransactionId.accountID()); - build.signatories(List.copyOf(finalSignatories)); - return build.build(); + @NonNull final TransactionBody body, @NonNull final Instant consensusNow, final long maxLifetime) { + final var txnId = body.transactionIDOrThrow(); + final var op = body.scheduleCreateOrThrow(); + final var payerId = txnId.accountIDOrThrow(); + final long expiry = calculateExpiration(op.expirationTime(), consensusNow, maxLifetime); + return Schedule.newBuilder() + .scheduleId((ScheduleID) null) + .deleted(false) + .executed(false) + .waitForExpiry(op.waitForExpiry()) + .adminKey(op.adminKey()) + .schedulerAccountId(payerId) + .payerAccountId(op.payerAccountIDOrElse(payerId)) + .schedulerAccountId(payerId) + .scheduleValidStart(txnId.transactionValidStart()) + .calculatedExpirationSecond(expiry) + .providedExpirationSecond( + op.expirationTimeOrElse(Timestamp.DEFAULT).seconds()) + .originalCreateTransaction(body) + .memo(op.memo()) + .scheduledTransaction(op.scheduledTransactionBody()) + .build(); } /** - * Gets next schedule id for a given parent transaction id and new schedule number. - * The schedule ID is created using the shard and realm numbers from the parent transaction ID, - * and the new schedule number. - * - * @param parentTransactionId the parent transaction id - * @param newScheduleNumber the new schedule number - * @return the next schedule id + * Builds the transaction id for a scheduled transaction from its schedule. + * @param schedule the schedule + * @return its transaction id */ @NonNull - static ScheduleID getNextScheduleID( - @NonNull final TransactionID parentTransactionId, final long newScheduleNumber) { - final AccountID schedulingAccount = parentTransactionId.accountIDOrThrow(); - final long shardNumber = schedulingAccount.shardNum(); - final long realmNumber = schedulingAccount.realmNum(); - final Builder builder = ScheduleID.newBuilder().shardNum(shardNumber).realmNum(realmNumber); - return builder.scheduleNum(newScheduleNumber).build(); - } - - /** - * Transaction id for scheduled transaction id. - * - * @param valueInState the value in state - * @return the transaction id - */ - @NonNull - static TransactionID transactionIdForScheduled(@NonNull Schedule valueInState) { - // original create transaction and its transaction ID will never be null, but Sonar... - final TransactionBody originalTransaction = valueInState.originalCreateTransactionOrThrow(); - final TransactionID parentTransactionId = originalTransaction.transactionIDOrThrow(); - final TransactionID.Builder builder = parentTransactionId.copyBuilder(); - // This is tricky. - // The scheduled child transaction that is executed must have a transaction ID that exactly matches - // the original CREATE transaction, not the parent transaction that triggers execution. So the child - // record is a child of "trigger" with an ID matching "create". This is what mono service does, but it - // is not ideal. Future work should change this (if at all possible) to have ID and parent match - // better, not rely on exact ID match, and only use the scheduleRef and scheduledId values in the transaction - // records (scheduleRef on the child pointing to the schedule ID, and scheduled ID on the parent pointing - // to the child transaction) for connecting things. - builder.scheduled(true); - return builder.build(); + static TransactionID transactionIdForScheduled(@NonNull final Schedule schedule) { + final var op = schedule.originalCreateTransactionOrThrow(); + final var parentTxnId = op.transactionIDOrThrow(); + return parentTxnId.copyBuilder().scheduled(true).build(); } private static long calculateExpiration( - @Nullable final Timestamp givenExpiration, - @NonNull final Instant currentConsensusTime, - final long maxLifeSeconds) { + @Nullable final Timestamp givenExpiration, @NonNull final Instant consensusNow, final long maxLifetime) { if (givenExpiration != null) { return givenExpiration.seconds(); } else { - final Instant currentPlusMaxLife = currentConsensusTime.plusSeconds(maxLifeSeconds); + final var currentPlusMaxLife = consensusNow.plusSeconds(maxLifetime); return currentPlusMaxLife.getEpochSecond(); } } - - /** - * Filters the signatories to only those that are required. - * The required signatories are those that are present in the incoming signatories set. - * - * @param signatories the signatories - * @param required the required - */ - static void filterSignatoriesToRequired(Set signatories, Set required) { - final Set incomingSignatories = Set.copyOf(signatories); - signatories.clear(); - filterSignatoriesToRequired(signatories, required, incomingSignatories); - } - - private static void filterSignatoriesToRequired( - final Set signatories, final Collection required, final Set incomingSignatories) { - for (final Key next : required) { - switch (next.key().kind()) { - case ED25519, ECDSA_SECP256K1, CONTRACT_ID, DELEGATABLE_CONTRACT_ID: - // Handle "primitive" keys, which are what the signatories set stores. - if (incomingSignatories.contains(next)) { - signatories.add(next); - } - break; - case KEY_LIST: - // Dive down into the elements of the key list - filterSignatoriesToRequired(signatories, next.keyList().keys(), incomingSignatories); - break; - case THRESHOLD_KEY: - // Dive down into the elements of the threshold key candidates list - filterSignatoriesToRequired( - signatories, next.thresholdKey().keys().keys(), incomingSignatories); - break; - case ECDSA_384, RSA_3072, UNSET: - // These types are unsupported - break; - } - } - } } diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java index 5eb6530944b0..8cdd5420e7b2 100644 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java +++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java @@ -16,20 +16,31 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static com.hedera.hapi.node.base.ResponseCodeEnum.ACCOUNT_ID_DOES_NOT_EXIST; +import static com.hedera.hapi.node.base.ResponseCodeEnum.IDENTICAL_SCHEDULE_ALREADY_CREATED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_ADMIN_KEY; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TRANSACTION; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TRANSACTION_BODY; +import static com.hedera.hapi.node.base.ResponseCodeEnum.MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.MEMO_TOO_LONG; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST; +import static com.hedera.hapi.node.base.SubType.DEFAULT; +import static com.hedera.hapi.node.base.SubType.SCHEDULE_CREATE_CONTRACT_CALL; import static com.hedera.node.app.hapi.utils.CommonPbjConverters.fromPbj; +import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.createProvisionalSchedule; +import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.functionalityForType; +import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.transactionIdForScheduled; +import static com.hedera.node.app.spi.validation.Validations.mustExist; +import static com.hedera.node.app.spi.workflows.HandleException.validateTrue; +import static com.hedera.node.app.spi.workflows.PreCheckException.validateFalsePreCheck; +import static com.hedera.node.app.spi.workflows.PreCheckException.validateTruePreCheck; import static java.util.Objects.requireNonNull; -import com.hedera.hapi.node.base.AccountID; import com.hedera.hapi.node.base.HederaFunctionality; -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.ResponseCodeEnum; -import com.hedera.hapi.node.base.SubType; -import com.hedera.hapi.node.base.TransactionID; +import com.hedera.hapi.node.base.ScheduleID; import com.hedera.hapi.node.scheduled.SchedulableTransactionBody; -import com.hedera.hapi.node.scheduled.SchedulableTransactionBody.DataOneOfType; import com.hedera.hapi.node.scheduled.ScheduleCreateTransactionBody; import com.hedera.hapi.node.state.schedule.Schedule; -import com.hedera.hapi.node.state.token.Account; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.node.app.hapi.fees.usage.SigUsage; import com.hedera.node.app.hapi.fees.usage.schedule.ScheduleOpsUsage; @@ -50,11 +61,10 @@ import com.hederahashgraph.api.proto.java.FeeData; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; import java.time.InstantSource; +import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.Set; import javax.inject.Inject; import javax.inject.Singleton; @@ -72,316 +82,162 @@ public ScheduleCreateHandler(@NonNull final InstantSource instantSource) { } @Override - public void pureChecks(@Nullable final TransactionBody currentTransaction) throws PreCheckException { - if (currentTransaction != null) { - checkValidTransactionId(currentTransaction.transactionID()); - checkLongTermSchedulable(getValidScheduleCreateBody(currentTransaction)); - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } + public void pureChecks(@NonNull final TransactionBody body) throws PreCheckException { + requireNonNull(body); + validateTruePreCheck(body.hasScheduleCreate(), INVALID_TRANSACTION_BODY); + final var op = body.scheduleCreateOrThrow(); + validateTruePreCheck(op.hasScheduledTransactionBody(), INVALID_TRANSACTION); + // (FUTURE) Add a dedicated response code for an op waiting for an unspecified expiration time + validateFalsePreCheck(op.waitForExpiry() && !op.hasExpirationTime(), INVALID_TRANSACTION); } - /** - * Pre-handles a {@link HederaFunctionality#SCHEDULE_CREATE} transaction, returning the metadata - * required to, at minimum, validate the signatures of all required and optional signing keys. - * - * @param context the {@link PreHandleContext} which collects all information - * @throws PreCheckException if the transaction cannot be handled successfully. - * The response code appropriate to the failure reason will be provided via this exception. - */ @Override public void preHandle(@NonNull final PreHandleContext context) throws PreCheckException { - Objects.requireNonNull(context, NULL_CONTEXT_MESSAGE); - final TransactionBody currentTransaction = context.body(); - final LedgerConfig ledgerConfig = context.configuration().getConfigData(LedgerConfig.class); - final HederaConfig hederaConfig = context.configuration().getConfigData(HederaConfig.class); - final SchedulingConfig schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); - final long maxExpireConfig = schedulingConfig.longTermEnabled() - ? schedulingConfig.maxExpirationFutureSeconds() - : ledgerConfig.scheduleTxExpiryTimeSecs(); - final ScheduleCreateTransactionBody scheduleBody = getValidScheduleCreateBody(currentTransaction); - if (scheduleBody.memo() != null && scheduleBody.memo().length() > hederaConfig.transactionMaxMemoUtf8Bytes()) { - throw new PreCheckException(ResponseCodeEnum.MEMO_TOO_LONG); + requireNonNull(context); + final var body = context.body(); + // We ensure this exists in pureChecks() + final var op = body.scheduleCreateOrThrow(); + final var config = context.configuration(); + final var hederaConfig = config.getConfigData(HederaConfig.class); + validateTruePreCheck(op.memo().length() <= hederaConfig.transactionMaxMemoUtf8Bytes(), MEMO_TOO_LONG); + // For backward compatibility, use ACCOUNT_ID_DOES_NOT_EXIST for a nonexistent designated payer + if (op.hasPayerAccountID()) { + final var accountStore = context.createStore(ReadableAccountStore.class); + final var payer = accountStore.getAccountById(op.payerAccountIDOrThrow()); + mustExist(payer, ACCOUNT_ID_DOES_NOT_EXIST); } - // @todo('future') add whitelist check here; mono checks very late, so we cannot check that here yet. - // validate the schedulable transaction - getSchedulableTransaction(currentTransaction); - // @todo('future') This key/account validation should move to handle once we finish and validate - // modularization; mono does this check too early, and may reject transactions - // that should succeed. - validatePayerAndScheduler(context, scheduleBody); - // If we have an explicit payer account for the scheduled child transaction, - // add it to optional keys (it might not have signed yet). - final Key payerKey = getKeyForPayerAccount(scheduleBody, context); - if (payerKey != null) context.optionalKey(payerKey); - if (scheduleBody.hasAdminKey()) { - // If an admin key is present, it must sign the create transaction. - context.requireKey(scheduleBody.adminKeyOrThrow()); + final var schedulingConfig = config.getConfigData(SchedulingConfig.class); + validateTruePreCheck( + isAllowedFunction(op.scheduledTransactionBodyOrThrow(), schedulingConfig), + SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); + // If an admin key is present, it must sign + if (op.hasAdminKey()) { + context.requireKey(op.adminKeyOrThrow()); } - checkSchedulableWhitelist(scheduleBody, schedulingConfig); - final TransactionID transactionId = currentTransaction.transactionID(); - if (transactionId != null) { - final Schedule provisionalSchedule = HandlerUtility.createProvisionalSchedule( - currentTransaction, instantSource.instant(), maxExpireConfig); - final Set allRequiredKeys = allKeysForTransaction(provisionalSchedule, context); - context.optionalKeys(allRequiredKeys); - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); + final var ledgerConfig = config.getConfigData(LedgerConfig.class); + final long maxLifetime = schedulingConfig.longTermEnabled() + ? schedulingConfig.maxExpirationFutureSeconds() + : ledgerConfig.scheduleTxExpiryTimeSecs(); + final var schedule = createProvisionalSchedule(body, instantSource.instant(), maxLifetime); + final var transactionKeys = getRequiredKeys(schedule, context::allKeysForTransaction); + // If the schedule payer inherits from the ScheduleCreate, it is already in the required keys + if (op.hasPayerAccountID()) { + context.optionalKey(transactionKeys.payerKey()); } + // Any required non-payer key may optionally provide its signature with the ScheduleCreate + context.optionalKeys(transactionKeys.requiredNonPayerKeys()); } - /** - * This method is called during the handle workflow. It executes the actual transaction. - * - * @throws HandleException if the transaction is not handled successfully. - * The response code appropriate to the failure reason will be provided via this exception. - */ @Override public void handle(@NonNull final HandleContext context) throws HandleException { - Objects.requireNonNull(context, NULL_CONTEXT_MESSAGE); - final Instant currentConsensusTime = context.consensusNow(); - final WritableScheduleStore scheduleStore = context.storeFactory().writableStore(WritableScheduleStore.class); - final SchedulingConfig schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); - final LedgerConfig ledgerConfig = context.configuration().getConfigData(LedgerConfig.class); - final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); - // Note: We must store the original ScheduleCreate transaction body in the Schedule so that we can compare - // those bytes to any new ScheduleCreate transaction for detecting duplicate ScheduleCreate - // transactions. SchedulesByEquality is the virtual map for that task - final TransactionBody currentTransaction = context.body(); - if (currentTransaction.hasScheduleCreate()) { - final var expirationSeconds = isLongTermEnabled - ? schedulingConfig.maxExpirationFutureSeconds() - : ledgerConfig.scheduleTxExpiryTimeSecs(); - final Schedule provisionalSchedule = HandlerUtility.createProvisionalSchedule( - currentTransaction, currentConsensusTime, expirationSeconds); - checkSchedulableWhitelistHandle(provisionalSchedule, schedulingConfig); - context.attributeValidator().validateMemo(provisionalSchedule.memo()); - context.attributeValidator() - .validateMemo(provisionalSchedule.scheduledTransaction().memo()); - if (provisionalSchedule.hasAdminKey()) { - try { - context.attributeValidator().validateKey(provisionalSchedule.adminKeyOrThrow()); - } catch (HandleException e) { - throw new HandleException(ResponseCodeEnum.INVALID_ADMIN_KEY); - } - } - final ResponseCodeEnum validationResult = - validate(provisionalSchedule, currentConsensusTime, isLongTermEnabled); - if (validationOk(validationResult)) { - final List possibleDuplicates = scheduleStore.getByEquality(provisionalSchedule); - if (isPresentIn(context, possibleDuplicates, provisionalSchedule)) { - throw new HandleException(ResponseCodeEnum.IDENTICAL_SCHEDULE_ALREADY_CREATED); - } - if (scheduleStore.numSchedulesInState() + 1 > schedulingConfig.maxNumber()) { - throw new HandleException(ResponseCodeEnum.MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED); - } - // Need to process the child transaction again, to get the *primitive* keys possibly required - final ScheduleKeysResult requiredKeysResult = allKeysForTransaction(provisionalSchedule, context); - final Set allRequiredKeys = requiredKeysResult.remainingRequiredKeys(); - final Set updatedSignatories = requiredKeysResult.updatedSignatories(); - final long nextId = context.entityNumGenerator().newEntityNum(); - Schedule finalSchedule = - HandlerUtility.completeProvisionalSchedule(provisionalSchedule, nextId, updatedSignatories); - if (tryToExecuteSchedule( - context, - finalSchedule, - allRequiredKeys, - updatedSignatories, - validationResult, - isLongTermEnabled)) { - finalSchedule = HandlerUtility.markExecuted(finalSchedule, currentConsensusTime); - } - scheduleStore.put(finalSchedule); - final ScheduleStreamBuilder scheduleRecords = - context.savepointStack().getBaseBuilder(ScheduleStreamBuilder.class); - scheduleRecords - .scheduleID(finalSchedule.scheduleId()) - .scheduledTransactionID(HandlerUtility.transactionIdForScheduled(finalSchedule)); - } else { - throw new HandleException(validationResult); - } - } else { - throw new HandleException(ResponseCodeEnum.INVALID_TRANSACTION); - } - } - - private boolean isPresentIn( - @NonNull final HandleContext context, - @Nullable final List possibleDuplicates, - @NonNull final Schedule provisionalSchedule) { - if (possibleDuplicates != null) { - for (final Schedule candidate : possibleDuplicates) { - if (compareForDuplicates(candidate, provisionalSchedule)) { - // Do not forget to set the ID of the existing duplicate in the receipt... - TransactionID scheduledTransactionID = candidate - .originalCreateTransaction() - .transactionID() - .copyBuilder() - .scheduled(true) - .build(); - context.savepointStack() - .getBaseBuilder(ScheduleStreamBuilder.class) - .scheduleID(candidate.scheduleId()) - .scheduledTransactionID(scheduledTransactionID); - return true; - } - } - } - return false; - } - - private boolean compareForDuplicates(@NonNull final Schedule candidate, @NonNull final Schedule requested) { - return candidate.waitForExpiry() == requested.waitForExpiry() - // @todo('9447') This should be modified to use calculated expiration once - // differential testing completes - && candidate.providedExpirationSecond() == requested.providedExpirationSecond() - && Objects.equals(candidate.memo(), requested.memo()) - && Objects.equals(candidate.adminKey(), requested.adminKey()) - // @note We should check scheduler here, but mono doesn't, so we cannot either, yet. - && Objects.equals(candidate.scheduledTransaction(), requested.scheduledTransaction()); - } - - @NonNull - private ScheduleCreateTransactionBody getValidScheduleCreateBody(@Nullable final TransactionBody currentTransaction) - throws PreCheckException { - if (currentTransaction != null) { - final ScheduleCreateTransactionBody scheduleCreateTransaction = currentTransaction.scheduleCreate(); - if (scheduleCreateTransaction != null) { - if (scheduleCreateTransaction.hasScheduledTransactionBody()) { - // this validates the schedulable transaction. - getSchedulableTransaction(currentTransaction); - return scheduleCreateTransaction; - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); - } - } - - @Nullable - private Key getKeyForPayerAccount( - @NonNull final ScheduleCreateTransactionBody scheduleBody, @NonNull final PreHandleContext context) - throws PreCheckException { - if (scheduleBody.hasPayerAccountID()) { - final AccountID payerForSchedule = scheduleBody.payerAccountIDOrThrow(); - return getKeyForAccount(context, payerForSchedule); - } else { - return null; - } - } + requireNonNull(context); - @NonNull - private static Key getKeyForAccount(@NonNull final PreHandleContext context, final AccountID accountToQuery) - throws PreCheckException { - final ReadableAccountStore accountStore = context.createStore(ReadableAccountStore.class); - final Account accountData = accountStore.getAccountById(accountToQuery); - if (accountData != null && accountData.key() != null) return accountData.key(); - else throw new PreCheckException(ResponseCodeEnum.INVALID_SCHEDULE_PAYER_ID); - } - - @SuppressWarnings("DataFlowIssue") - private void checkSchedulableWhitelistHandle(final Schedule provisionalSchedule, final SchedulingConfig config) - throws HandleException { - final Set whitelist = config.whitelist().functionalitySet(); - final SchedulableTransactionBody scheduled = - provisionalSchedule.originalCreateTransaction().scheduleCreate().scheduledTransactionBody(); - final DataOneOfType transactionType = scheduled.data().kind(); - final HederaFunctionality functionType = HandlerUtility.functionalityForType(transactionType); - if (!whitelist.contains(functionType)) { - throw new HandleException(ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); - } - } - - private void validatePayerAndScheduler( - final PreHandleContext context, final ScheduleCreateTransactionBody scheduleBody) throws PreCheckException { - final ReadableAccountStore accountStore = context.createStore(ReadableAccountStore.class); - final AccountID payerForSchedule = scheduleBody.payerAccountID(); - if (payerForSchedule != null) { - final Account payer = accountStore.getAccountById(payerForSchedule); - if (payer == null) { - throw new PreCheckException(ResponseCodeEnum.ACCOUNT_ID_DOES_NOT_EXIST); - } - } - final AccountID schedulerId = context.payer(); - if (schedulerId != null) { - final Account scheduler = accountStore.getAccountById(schedulerId); - if (scheduler == null) { - throw new PreCheckException(ResponseCodeEnum.ACCOUNT_ID_DOES_NOT_EXIST); + final var schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); + final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); + final var ledgerConfig = context.configuration().getConfigData(LedgerConfig.class); + final var expirationSeconds = isLongTermEnabled + ? schedulingConfig.maxExpirationFutureSeconds() + : ledgerConfig.scheduleTxExpiryTimeSecs(); + final var consensusNow = context.consensusNow(); + final var provisionalSchedule = createProvisionalSchedule(context.body(), consensusNow, expirationSeconds); + validateTrue( + isAllowedFunction(provisionalSchedule.scheduledTransactionOrThrow(), schedulingConfig), + SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); + context.attributeValidator().validateMemo(provisionalSchedule.memo()); + context.attributeValidator() + .validateMemo(provisionalSchedule.scheduledTransactionOrThrow().memo()); + if (provisionalSchedule.hasAdminKey()) { + try { + context.attributeValidator().validateKey(provisionalSchedule.adminKeyOrThrow()); + } catch (HandleException e) { + throw new HandleException(INVALID_ADMIN_KEY); } } - } + final var validationResult = validate(provisionalSchedule, consensusNow, isLongTermEnabled); + validateTrue(isMaybeExecutable(validationResult), validationResult); - private void checkSchedulableWhitelist( - @NonNull final ScheduleCreateTransactionBody scheduleCreate, @NonNull final SchedulingConfig config) - throws PreCheckException { - final Set whitelist = config.whitelist().functionalitySet(); - final DataOneOfType transactionType = - scheduleCreate.scheduledTransactionBody().data().kind(); - final HederaFunctionality functionType = HandlerUtility.functionalityForType(transactionType); - if (!whitelist.contains(functionType)) { - throw new PreCheckException(ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); + // Note that we must store the original ScheduleCreate transaction body in the Schedule so + // we can compare those bytes to any new ScheduleCreate transaction for detecting duplicate + // ScheduleCreate transactions. SchedulesByEquality is the virtual map for that task. + final var scheduleStore = context.storeFactory().writableStore(WritableScheduleStore.class); + final var possibleDuplicates = scheduleStore.getByEquality(provisionalSchedule); + final var duplicate = maybeDuplicate(provisionalSchedule, possibleDuplicates); + if (duplicate != null) { + final var scheduledTxnId = duplicate + .originalCreateTransactionOrThrow() + .transactionIDOrThrow() + .copyBuilder() + .scheduled(true) + .build(); + context.savepointStack() + .getBaseBuilder(ScheduleStreamBuilder.class) + .scheduleID(duplicate.scheduleId()) + .scheduledTransactionID(scheduledTxnId); + throw new HandleException(IDENTICAL_SCHEDULE_ALREADY_CREATED); } - } + validateTrue( + scheduleStore.numSchedulesInState() + 1 <= schedulingConfig.maxNumber(), + MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED); - private void checkLongTermSchedulable(final ScheduleCreateTransactionBody scheduleCreate) throws PreCheckException { - // @todo('long term schedule') HIP needed?, before enabling long term schedules, add a response code for - // INVALID_LONG_TERM_SCHEDULE and fix this exception. - if (scheduleCreate.waitForExpiry() && !scheduleCreate.hasExpirationTime()) { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION /*INVALID_LONG_TERM_SCHEDULE*/); - } - } - - @NonNull - private SchedulableTransactionBody getSchedulableTransaction(@NonNull final TransactionBody currentTransaction) - throws PreCheckException { - final ScheduleCreateTransactionBody scheduleBody = currentTransaction.scheduleCreate(); - if (scheduleBody != null) { - final SchedulableTransactionBody scheduledTransaction = scheduleBody.scheduledTransactionBody(); - if (scheduledTransaction != null) { - return scheduledTransaction; - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); + // With all validations done, we check if the new schedule is already executable + final var transactionKeys = getTransactionKeysOrThrow(provisionalSchedule, context::allKeysForTransaction); + final var requiredKeys = allRequiredKeys(transactionKeys); + final var signatories = + newSignatories(context.keyVerifier().signingCryptoKeys(), Collections.emptyList(), requiredKeys); + final var schedulingTxnId = + provisionalSchedule.originalCreateTransactionOrThrow().transactionIDOrThrow(); + final var schedulerId = schedulingTxnId.accountIDOrThrow(); + final var scheduleId = ScheduleID.newBuilder() + .shardNum(schedulerId.shardNum()) + .realmNum(schedulerId.realmNum()) + .scheduleNum(context.entityNumGenerator().newEntityNum()) + .build(); + var schedule = provisionalSchedule + .copyBuilder() + .scheduleId(scheduleId) + .schedulerAccountId(schedulerId) + .signatories(signatories) + .build(); + if (tryToExecuteSchedule(context, schedule, requiredKeys, validationResult, isLongTermEnabled)) { + schedule = markedExecuted(schedule, consensusNow); } + scheduleStore.put(schedule); + context.savepointStack() + .getBaseBuilder(ScheduleStreamBuilder.class) + .scheduleID(schedule.scheduleId()) + .scheduledTransactionID(transactionIdForScheduled(schedule)); } @NonNull @Override public Fees calculateFees(@NonNull final FeeContext feeContext) { requireNonNull(feeContext); - final var op = feeContext.body(); + final var body = feeContext.body(); final var config = feeContext.configuration(); final var ledgerConfig = config.getConfigData(LedgerConfig.class); final var schedulingConfig = config.getConfigData(SchedulingConfig.class); - final var subType = (op.scheduleCreateOrThrow().hasScheduledTransactionBody() - && op.scheduleCreateOrThrow().scheduledTransactionBody().hasContractCall()) - ? SubType.SCHEDULE_CREATE_CONTRACT_CALL - : SubType.DEFAULT; - + final var subType = body.scheduleCreateOrElse(ScheduleCreateTransactionBody.DEFAULT) + .scheduledTransactionBodyOrElse(SchedulableTransactionBody.DEFAULT) + .hasContractCall() + ? SCHEDULE_CREATE_CONTRACT_CALL + : DEFAULT; return feeContext .feeCalculatorFactory() .feeCalculator(subType) .legacyCalculate(sigValueObj -> usageGiven( - fromPbj(op), + fromPbj(body), sigValueObj, schedulingConfig.longTermEnabled(), ledgerConfig.scheduleTxExpiryTimeSecs())); } - public FeeData usageGiven( - final com.hederahashgraph.api.proto.java.TransactionBody txn, - final SigValueObj svo, + private @NonNull FeeData usageGiven( + @NonNull final com.hederahashgraph.api.proto.java.TransactionBody txn, + @NonNull final SigValueObj svo, final boolean longTermEnabled, final long scheduledTxExpiryTimeSecs) { final var op = txn.getScheduleCreate(); final var sigUsage = new SigUsage(svo.getTotalSigCount(), svo.getSignatureSize(), svo.getPayerAcctSigCount()); - final long lifetimeSecs; if (op.hasExpirationTime() && longTermEnabled) { lifetimeSecs = Math.max( @@ -393,4 +249,32 @@ public FeeData usageGiven( } return scheduleOpsUsage.scheduleCreateUsage(txn, sigUsage, lifetimeSecs); } + + private @Nullable Schedule maybeDuplicate( + @NonNull final Schedule schedule, @Nullable final List duplicates) { + if (duplicates == null) { + return null; + } + for (final var duplicate : duplicates) { + if (areIdentical(duplicate, schedule)) { + return duplicate; + } + } + return null; + } + + private boolean areIdentical(@NonNull final Schedule candidate, @NonNull final Schedule requested) { + return candidate.waitForExpiry() == requested.waitForExpiry() + && candidate.providedExpirationSecond() == requested.providedExpirationSecond() + && Objects.equals(candidate.memo(), requested.memo()) + && Objects.equals(candidate.adminKey(), requested.adminKey()) + // @note We should check scheduler here, but mono doesn't, so we cannot either, yet. + && Objects.equals(candidate.scheduledTransaction(), requested.scheduledTransaction()); + } + + private boolean isAllowedFunction( + @NonNull final SchedulableTransactionBody body, @NonNull final SchedulingConfig config) { + final var scheduledFunctionality = functionalityForType(body.data().kind()); + return config.whitelist().functionalitySet().contains(scheduledFunctionality); + } } diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandler.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandler.java index dc3d612b6b5e..ea4dfa3e504e 100644 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandler.java +++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandler.java @@ -16,12 +16,19 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_SCHEDULE_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TRANSACTION_BODY; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_ALREADY_DELETED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_IS_IMMUTABLE; +import static com.hedera.hapi.node.base.ResponseCodeEnum.UNAUTHORIZED; import static com.hedera.node.app.hapi.utils.CommonPbjConverters.fromPbj; +import static com.hedera.node.app.spi.workflows.HandleException.validateFalse; +import static com.hedera.node.app.spi.workflows.HandleException.validateTrue; +import static com.hedera.node.app.spi.workflows.PreCheckException.validateTruePreCheck; import static java.util.Objects.requireNonNull; import com.hedera.hapi.node.base.HederaFunctionality; -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.base.ScheduleID; import com.hedera.hapi.node.base.SubType; import com.hedera.hapi.node.scheduled.ScheduleDeleteTransactionBody; @@ -35,7 +42,6 @@ import com.hedera.node.app.service.schedule.WritableScheduleStore; import com.hedera.node.app.spi.fees.FeeContext; import com.hedera.node.app.spi.fees.Fees; -import com.hedera.node.app.spi.signatures.SignatureVerification; import com.hedera.node.app.spi.workflows.HandleContext; import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.PreCheckException; @@ -46,7 +52,6 @@ import com.hederahashgraph.api.proto.java.FeeData; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; import javax.inject.Inject; import javax.inject.Singleton; @@ -58,113 +63,74 @@ public class ScheduleDeleteHandler extends AbstractScheduleHandler implements Tr private final ScheduleOpsUsage scheduleOpsUsage = new ScheduleOpsUsage(); @Inject - public ScheduleDeleteHandler() {} - - @Override - public void pureChecks(@Nullable final TransactionBody currentTransaction) throws PreCheckException { - getValidScheduleDeleteBody(currentTransaction); + public ScheduleDeleteHandler() { + // Dagger2 } - @NonNull - private ScheduleDeleteTransactionBody getValidScheduleDeleteBody(@Nullable final TransactionBody currentTransaction) - throws PreCheckException { - if (currentTransaction != null) { - final ScheduleDeleteTransactionBody scheduleDeleteTransaction = currentTransaction.scheduleDelete(); - if (scheduleDeleteTransaction != null) { - if (scheduleDeleteTransaction.scheduleID() != null) { - return scheduleDeleteTransaction; - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); - } + @Override + public void pureChecks(@NonNull final TransactionBody body) throws PreCheckException { + requireNonNull(body); + validateTruePreCheck(body.hasScheduleDelete(), INVALID_TRANSACTION_BODY); + final var op = body.scheduleDeleteOrThrow(); + validateTruePreCheck(op.hasScheduleID(), INVALID_SCHEDULE_ID); } @Override public void preHandle(@NonNull final PreHandleContext context) throws PreCheckException { - Objects.requireNonNull(context, NULL_CONTEXT_MESSAGE); - final ReadableScheduleStore scheduleStore = context.createStore(ReadableScheduleStore.class); + requireNonNull(context); + final var scheduleStore = context.createStore(ReadableScheduleStore.class); final SchedulingConfig schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); - final TransactionBody currentTransaction = context.body(); - final ScheduleDeleteTransactionBody scheduleDeleteTransaction = getValidScheduleDeleteBody(currentTransaction); - if (scheduleDeleteTransaction.scheduleID() != null) { - final Schedule scheduleData = - preValidate(scheduleStore, isLongTermEnabled, scheduleDeleteTransaction.scheduleID()); - final Key adminKey = scheduleData.adminKey(); - if (adminKey != null) context.requireKey(adminKey); - else throw new PreCheckException(ResponseCodeEnum.SCHEDULE_IS_IMMUTABLE); - // Once deleted or executed, no later transaction will change that status. - if (scheduleData.deleted()) throw new PreCheckException(ResponseCodeEnum.SCHEDULE_ALREADY_DELETED); - if (scheduleData.executed()) throw new PreCheckException(ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED); - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } + final var op = context.body().scheduleDeleteOrThrow(); + final var schedule = getValidated(op.scheduleIDOrThrow(), scheduleStore, isLongTermEnabled); + validateFalse(schedule.deleted(), SCHEDULE_ALREADY_DELETED); + validateFalse(schedule.executed(), SCHEDULE_ALREADY_EXECUTED); + validateTruePreCheck(schedule.hasAdminKey(), SCHEDULE_IS_IMMUTABLE); + context.requireKey(schedule.adminKeyOrThrow()); } @Override public void handle(@NonNull final HandleContext context) throws HandleException { - Objects.requireNonNull(context, NULL_CONTEXT_MESSAGE); - final WritableScheduleStore scheduleStore = context.storeFactory().writableStore(WritableScheduleStore.class); - final TransactionBody currentTransaction = context.body(); - final SchedulingConfig schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); - try { - final ScheduleDeleteTransactionBody scheduleToDelete = getValidScheduleDeleteBody(currentTransaction); - final ScheduleID idToDelete = scheduleToDelete.scheduleID(); - if (idToDelete != null) { - final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); - final Schedule scheduleData = reValidate(scheduleStore, isLongTermEnabled, idToDelete); - if (scheduleData.hasAdminKey()) { - final SignatureVerification verificationResult = - context.keyVerifier().verificationFor(scheduleData.adminKeyOrThrow()); - if (verificationResult.passed()) { - scheduleStore.delete(idToDelete, context.consensusNow()); - final ScheduleStreamBuilder scheduleRecords = - context.savepointStack().getBaseBuilder(ScheduleStreamBuilder.class); - scheduleRecords.scheduleID(idToDelete); - } else { - throw new HandleException(ResponseCodeEnum.UNAUTHORIZED); - } - } else { - throw new HandleException(ResponseCodeEnum.SCHEDULE_IS_IMMUTABLE); - } - } else { - throw new HandleException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } - } catch (final IllegalStateException ignored) { - throw new HandleException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } catch (final PreCheckException translate) { - throw new HandleException(translate.responseCode()); - } + requireNonNull(context); + final var scheduleStore = context.storeFactory().writableStore(WritableScheduleStore.class); + final var body = context.body(); + final var op = body.scheduleDeleteOrThrow(); + final var scheduleId = op.scheduleIDOrThrow(); + final var schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); + final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); + final var schedule = revalidateOrThrow(scheduleId, scheduleStore, isLongTermEnabled); + validateTrue(schedule.hasAdminKey(), SCHEDULE_IS_IMMUTABLE); + final var verificationResult = context.keyVerifier().verificationFor(schedule.adminKeyOrThrow()); + validateTrue(verificationResult.passed(), UNAUTHORIZED); + scheduleStore.delete(scheduleId, context.consensusNow()); + context.savepointStack().getBaseBuilder(ScheduleStreamBuilder.class).scheduleID(scheduleId); } /** * Verify that the transaction and schedule still meet the validation criteria expressed in the - * {@link AbstractScheduleHandler#preValidate(ReadableScheduleStore, boolean, ScheduleID)} method. + * {@link AbstractScheduleHandler#getValidated(ScheduleID, ReadableScheduleStore, boolean)} method. + * + * @param scheduleId the Schedule ID of the item to mark as deleted. * @param scheduleStore a Readable source of Schedule data from state * @param isLongTermEnabled a flag indicating if long term scheduling is enabled in configuration. - * @param idToDelete the Schedule ID of the item to mark as deleted. * @return a schedule metadata read from state for the ID given, if all validation checks pass * @throws HandleException if any validation check fails. */ @NonNull - protected Schedule reValidate( + protected Schedule revalidateOrThrow( + @NonNull final ScheduleID scheduleId, @NonNull final ReadableScheduleStore scheduleStore, - final boolean isLongTermEnabled, - @Nullable final ScheduleID idToDelete) + final boolean isLongTermEnabled) throws HandleException { + requireNonNull(scheduleId); + requireNonNull(scheduleStore); try { - final Schedule validSchedule = preValidate(scheduleStore, isLongTermEnabled, idToDelete); - // Once deleted or executed, no later transaction will change that status. - if (validSchedule.deleted()) throw new HandleException(ResponseCodeEnum.SCHEDULE_ALREADY_DELETED); - if (validSchedule.executed()) throw new HandleException(ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED); - return validSchedule; - } catch (final PreCheckException translated) { - throw new HandleException(translated.responseCode()); + final var schedule = getValidated(scheduleId, scheduleStore, isLongTermEnabled); + validateFalse(schedule.deleted(), SCHEDULE_ALREADY_DELETED); + validateFalse(schedule.executed(), SCHEDULE_ALREADY_EXECUTED); + return schedule; + } catch (final PreCheckException e) { + throw new HandleException(e.responseCode()); } } @@ -172,11 +138,10 @@ protected Schedule reValidate( @Override public Fees calculateFees(@NonNull final FeeContext feeContext) { requireNonNull(feeContext); - final var op = feeContext.body(); - final var scheduleStore = feeContext.readableStore(ReadableScheduleStore.class); - final var schedule = scheduleStore.get(op.scheduleDeleteOrThrow().scheduleIDOrThrow()); - + final var op = feeContext.body(); + final var schedule = scheduleStore.get( + op.scheduleDeleteOrElse(ScheduleDeleteTransactionBody.DEFAULT).scheduleIDOrElse(ScheduleID.DEFAULT)); return feeContext .feeCalculatorFactory() .feeCalculator(SubType.DEFAULT) @@ -191,12 +156,11 @@ public Fees calculateFees(@NonNull final FeeContext feeContext) { } private FeeData usageGiven( - final com.hederahashgraph.api.proto.java.TransactionBody txn, - final SigValueObj svo, - final Schedule schedule, + @NonNull final com.hederahashgraph.api.proto.java.TransactionBody txn, + @NonNull final SigValueObj svo, + @Nullable final Schedule schedule, final long scheduledTxExpiryTimeSecs) { final var sigUsage = new SigUsage(svo.getTotalSigCount(), svo.getSignatureSize(), svo.getPayerAcctSigCount()); - if (schedule != null) { return scheduleOpsUsage.scheduleDeleteUsage(txn, sigUsage, schedule.calculatedExpirationSecond()); } else { diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandler.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandler.java index 6d2b6e64f669..66b782034cea 100644 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandler.java +++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandler.java @@ -16,19 +16,20 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_SCHEDULE_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TRANSACTION_BODY; +import static com.hedera.hapi.node.base.ResponseCodeEnum.NO_NEW_VALID_SIGNATURES; import static com.hedera.node.app.hapi.utils.CommonPbjConverters.fromPbj; +import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.transactionIdForScheduled; +import static com.hedera.node.app.spi.workflows.HandleException.validateTrue; +import static com.hedera.node.app.spi.workflows.PreCheckException.validateTruePreCheck; import static java.util.Objects.requireNonNull; -import com.hedera.hapi.node.base.AccountID; import com.hedera.hapi.node.base.HederaFunctionality; -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.base.ScheduleID; import com.hedera.hapi.node.base.SubType; -import com.hedera.hapi.node.scheduled.SchedulableTransactionBody; import com.hedera.hapi.node.scheduled.ScheduleSignTransactionBody; import com.hedera.hapi.node.state.schedule.Schedule; -import com.hedera.hapi.node.state.token.Account; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.node.app.hapi.fees.usage.SigUsage; import com.hedera.node.app.hapi.fees.usage.schedule.ScheduleOpsUsage; @@ -36,7 +37,6 @@ import com.hedera.node.app.service.schedule.ReadableScheduleStore; import com.hedera.node.app.service.schedule.ScheduleStreamBuilder; import com.hedera.node.app.service.schedule.WritableScheduleStore; -import com.hedera.node.app.service.token.ReadableAccountStore; import com.hedera.node.app.spi.fees.FeeContext; import com.hedera.node.app.spi.fees.Fees; import com.hedera.node.app.spi.workflows.HandleContext; @@ -49,9 +49,6 @@ import com.hederahashgraph.api.proto.java.FeeData; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.util.Objects; -import java.util.Set; import javax.inject.Inject; import javax.inject.Singleton; @@ -59,160 +56,79 @@ * This class contains all workflow-related functionality regarding {@link HederaFunctionality#SCHEDULE_SIGN}. */ @Singleton -@SuppressWarnings("OverlyCoupledClass") public class ScheduleSignHandler extends AbstractScheduleHandler implements TransactionHandler { private final ScheduleOpsUsage scheduleOpsUsage = new ScheduleOpsUsage(); @Inject - public ScheduleSignHandler() {} + public ScheduleSignHandler() { + // Dagger2 + } @Override - public void pureChecks(@Nullable final TransactionBody currentTransaction) throws PreCheckException { - if (currentTransaction != null) { - checkValidTransactionId(currentTransaction.transactionID()); - getValidScheduleSignBody(currentTransaction); - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } + public void pureChecks(@NonNull final TransactionBody body) throws PreCheckException { + requireNonNull(body); + validateTruePreCheck(body.hasScheduleSign(), INVALID_TRANSACTION_BODY); + final var op = body.scheduleSignOrThrow(); + validateTruePreCheck(op.hasScheduleID(), INVALID_SCHEDULE_ID); } - /** - * Pre-handles a {@link HederaFunctionality#SCHEDULE_SIGN} transaction, returning the metadata - * required to, at minimum, validate the signatures of all required signing keys. - * - * @param context the {@link PreHandleContext} which collects all information - * @throws PreCheckException if the transaction cannot be handled successfully. - * The response code appropriate to the failure reason will be provided via this exception. - */ @Override public void preHandle(@NonNull final PreHandleContext context) throws PreCheckException { - Objects.requireNonNull(context, NULL_CONTEXT_MESSAGE); - final ReadableScheduleStore scheduleStore = context.createStore(ReadableScheduleStore.class); - final SchedulingConfig schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); + requireNonNull(context); + final var op = context.body().scheduleSignOrThrow(); + final var scheduleStore = context.createStore(ReadableScheduleStore.class); + final var schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); - final TransactionBody currentTransaction = context.body(); - final ScheduleSignTransactionBody scheduleSignTransaction = getValidScheduleSignBody(currentTransaction); - if (scheduleSignTransaction.scheduleID() != null) { - final Schedule scheduleData = - preValidate(scheduleStore, isLongTermEnabled, scheduleSignTransaction.scheduleID()); - final AccountID payerAccount = scheduleData.payerAccountId(); - // Note, payer should never be null, but we have to check anyway, because Sonar doesn't know better. - if (payerAccount != null) { - final ReadableAccountStore accountStore = context.createStore(ReadableAccountStore.class); - final Account payer = accountStore.getAccountById(payerAccount); - if (payer != null) { - final Key payerKey = payer.key(); - if (payerKey != null) context.optionalKey(payerKey); - } - } - try { - final Set allKeysNeeded = allKeysForTransaction(scheduleData, context); - context.optionalKeys(allKeysNeeded); - } catch (HandleException translated) { - throw new PreCheckException(translated.getStatus()); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } - // context now has all of the keys required by the scheduled transaction in optional keys + final var schedule = getValidated(op.scheduleIDOrThrow(), scheduleStore, isLongTermEnabled); + final var requiredKeys = getRequiredKeys(schedule, context::allKeysForTransaction); + context.optionalKey(requiredKeys.payerKey()); + context.optionalKeys(requiredKeys.requiredNonPayerKeys()); } - /** - * This method is called during the handle workflow. It executes the actual transaction. - * - * @throws HandleException if the transaction is not handled successfully. - * The response code appropriate to the failure reason will be provided via this exception. - */ - @SuppressWarnings({"FeatureEnvy", "OverlyCoupledMethod"}) @Override public void handle(@NonNull final HandleContext context) throws HandleException { - Objects.requireNonNull(context, NULL_CONTEXT_MESSAGE); - final Instant currentConsensusTime = context.consensusNow(); - final WritableScheduleStore scheduleStore = context.storeFactory().writableStore(WritableScheduleStore.class); - final SchedulingConfig schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); + requireNonNull(context); + final var op = context.body().scheduleSignOrThrow(); + final var scheduleStore = context.storeFactory().writableStore(WritableScheduleStore.class); + // Non-final because we may update signatories and/or mark it as executed before putting it back + var schedule = scheduleStore.getForModify(op.scheduleIDOrThrow()); + + final var consensusNow = context.consensusNow(); + final var schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class); final boolean isLongTermEnabled = schedulingConfig.longTermEnabled(); - final TransactionBody currentTransaction = context.body(); - if (currentTransaction.hasScheduleSign()) { - final ScheduleSignTransactionBody signTransaction = currentTransaction.scheduleSignOrThrow(); - final ScheduleID idToSign = signTransaction.scheduleID(); - final Schedule scheduleData = scheduleStore.get(idToSign); - final ResponseCodeEnum validationResult = validate(scheduleData, currentConsensusTime, isLongTermEnabled); - if (validationOk(validationResult)) { - final Schedule scheduleToSign = scheduleStore.getForModify(idToSign); - // ID to sign will never be null here, but sonar needs this check... - if (scheduleToSign != null && idToSign != null) { - final SchedulableTransactionBody schedulableTransaction = scheduleToSign.scheduledTransaction(); - if (schedulableTransaction != null) { - final ScheduleKeysResult requiredKeysResult = allKeysForTransaction(scheduleToSign, context); - final Set allRequiredKeys = requiredKeysResult.remainingRequiredKeys(); - final Set updatedSignatories = requiredKeysResult.updatedSignatories(); - if (tryToExecuteSchedule( - context, - scheduleToSign, - allRequiredKeys, - updatedSignatories, - validationResult, - isLongTermEnabled)) { - scheduleStore.put(HandlerUtility.replaceSignatoriesAndMarkExecuted( - scheduleToSign, updatedSignatories, currentConsensusTime)); - } else { - verifyHasNewSignatures(scheduleToSign.signatories(), updatedSignatories); - scheduleStore.put(HandlerUtility.replaceSignatories(scheduleToSign, updatedSignatories)); - } - final ScheduleStreamBuilder scheduleRecords = - context.savepointStack().getBaseBuilder(ScheduleStreamBuilder.class); - scheduleRecords.scheduledTransactionID( - HandlerUtility.transactionIdForScheduled(scheduleToSign)); - // Based on fuzzy-record matching this field may not be set in mono-service records - // scheduleRecords.scheduleID(idToSign); - } else { - // Note, this will never happen, but Sonar static analysis can't figure that out. - throw new HandleException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } - } else { - throw new HandleException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } - } else { - throw new HandleException(validationResult); - } - } else { - throw new HandleException(ResponseCodeEnum.INVALID_TRANSACTION); - } - } + final var validationResult = validate(schedule, consensusNow, isLongTermEnabled); + validateTrue(isMaybeExecutable(validationResult), validationResult); - @NonNull - private ScheduleSignTransactionBody getValidScheduleSignBody(@Nullable final TransactionBody currentTransaction) - throws PreCheckException { - if (currentTransaction != null) { - final ScheduleSignTransactionBody scheduleSignTransaction = currentTransaction.scheduleSign(); - if (scheduleSignTransaction != null) { - if (scheduleSignTransaction.scheduleID() != null) { - return scheduleSignTransaction; - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_SCHEDULE_ID); - } - } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } + // With all validations done, we update the signatories on the schedule + final var transactionKeys = getTransactionKeysOrThrow(schedule, context::allKeysForTransaction); + final var requiredKeys = allRequiredKeys(transactionKeys); + final var signatories = schedule.signatories(); + final var newSignatories = newSignatories(context.keyVerifier().signingCryptoKeys(), signatories, requiredKeys); + schedule = schedule.copyBuilder().signatories(newSignatories).build(); + if (tryToExecuteSchedule(context, schedule, requiredKeys, validationResult, isLongTermEnabled)) { + scheduleStore.put(markedExecuted(schedule, consensusNow)); } else { - throw new PreCheckException(ResponseCodeEnum.INVALID_TRANSACTION); + validateTrue(!newSignatories.equals(signatories), NO_NEW_VALID_SIGNATURES); + scheduleStore.put(schedule); } + context.savepointStack() + .getBaseBuilder(ScheduleStreamBuilder.class) + .scheduledTransactionID(transactionIdForScheduled(schedule)); } @NonNull @Override public Fees calculateFees(@NonNull final FeeContext feeContext) { requireNonNull(feeContext); - final var op = feeContext.body(); - + final var body = feeContext.body(); final var scheduleStore = feeContext.readableStore(ReadableScheduleStore.class); - final var schedule = scheduleStore.get(op.scheduleSignOrThrow().scheduleIDOrThrow()); - + final var schedule = scheduleStore.get( + body.scheduleSignOrElse(ScheduleSignTransactionBody.DEFAULT).scheduleIDOrElse(ScheduleID.DEFAULT)); return feeContext .feeCalculatorFactory() .feeCalculator(SubType.DEFAULT) .legacyCalculate(sigValueObj -> usageGiven( - fromPbj(op), + fromPbj(body), sigValueObj, schedule, feeContext @@ -222,12 +138,11 @@ public Fees calculateFees(@NonNull final FeeContext feeContext) { } private FeeData usageGiven( - final com.hederahashgraph.api.proto.java.TransactionBody txn, - final SigValueObj svo, - final Schedule schedule, + @NonNull final com.hederahashgraph.api.proto.java.TransactionBody txn, + @NonNull final SigValueObj svo, + @Nullable final Schedule schedule, final long scheduledTxExpiryTimeSecs) { final var sigUsage = new SigUsage(svo.getTotalSigCount(), svo.getSignatureSize(), svo.getPayerAcctSigCount()); - if (schedule != null) { return scheduleOpsUsage.scheduleSignUsage(txn, sigUsage, schedule.calculatedExpirationSecond()); } else { diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleVerificationAssistant.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleVerificationAssistant.java deleted file mode 100644 index ff30a9031980..000000000000 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleVerificationAssistant.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (C) 2023-2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.hedera.node.app.service.schedule.impl.handlers; - -import com.hedera.hapi.node.base.Key; -import com.hedera.node.app.spi.signatures.SignatureVerification; -import com.hedera.node.app.spi.signatures.VerificationAssistant; -import java.util.Set; - -/** - * Verification Assistant that "verifies" keys previously verified via Schedule create or sign transactions. - * This class also observes all primitive keys that are still unverified, and potentially passes those - * on via the side effect of adding them to the sets provided in the constructor, which must be modifiable. - */ -public class ScheduleVerificationAssistant implements VerificationAssistant { - private final Set preValidatedKeys; - private final Set failedPrimitiveKeys; - - /** - * Create a new schedule verification assistant. - * - * @param preValidatedKeys a modifiable {@code Set} of primitive keys previously verified. - * @param failedPrimitiveKeys an empty and modifiable {@code Set} to receive a list of - * primitive keys that are still unverified. - */ - public ScheduleVerificationAssistant(final Set preValidatedKeys, Set failedPrimitiveKeys) { - this.preValidatedKeys = preValidatedKeys; - this.failedPrimitiveKeys = failedPrimitiveKeys; - } - - @Override - public boolean test(final Key key, final SignatureVerification priorVerify) { - if (key.hasKeyList() || key.hasThresholdKey() || key.hasContractID() || key.hasDelegatableContractId()) { - return priorVerify.passed(); - } else { - final boolean isValid = priorVerify.passed() || preValidatedKeys.contains(key); - if (!isValid) { - failedPrimitiveKeys.add(key); - } else if (priorVerify.passed()) { - preValidatedKeys.add(key); - } - return isValid; - } - } -} diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java b/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java index 5c707dc1d6a0..ae3d52e1be9f 100644 --- a/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java +++ b/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java @@ -1,6 +1,5 @@ module com.hedera.node.app.service.schedule.impl { requires transitive com.hedera.node.app.hapi.fees; - requires transitive com.hedera.node.app.hapi.utils; requires transitive com.hedera.node.app.service.schedule; requires transitive com.hedera.node.app.spi; requires transitive com.hedera.node.hapi; @@ -10,6 +9,7 @@ requires transitive dagger; requires transitive static java.compiler; // javax.annotation.processing.Generated requires transitive javax.inject; + requires com.hedera.node.app.hapi.utils; requires com.hedera.node.app.service.token; // ReadableAccountStore: payer account details on create, sign, query requires com.hedera.node.config; requires com.google.common; diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandlerTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandlerTest.java deleted file mode 100644 index 22762832afaf..000000000000 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandlerTest.java +++ /dev/null @@ -1,285 +0,0 @@ -/* - * Copyright (C) 2023-2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.hedera.node.app.service.schedule.impl.handlers; - -import static org.assertj.core.api.BDDAssertions.assertThat; -import static org.assertj.core.api.BDDAssertions.assertThatNoException; -import static org.assertj.core.api.BDDAssertions.assertThatThrownBy; -import static org.mockito.Mockito.any; - -import com.hedera.hapi.node.base.AccountID; -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.ResponseCodeEnum; -import com.hedera.hapi.node.base.ScheduleID; -import com.hedera.hapi.node.base.TransactionID; -import com.hedera.hapi.node.scheduled.SchedulableTransactionBody; -import com.hedera.hapi.node.state.schedule.Schedule; -import com.hedera.hapi.node.transaction.TransactionBody; -import com.hedera.node.app.service.schedule.impl.handlers.AbstractScheduleHandler.ScheduleKeysResult; -import com.hedera.node.app.spi.workflows.HandleContext.TransactionCategory; -import com.hedera.node.app.spi.workflows.HandleException; -import com.hedera.node.app.spi.workflows.PreCheckException; -import com.hedera.node.app.spi.workflows.PreHandleContext; -import com.hedera.node.app.spi.workflows.TransactionKeys; -import com.hedera.node.app.workflows.handle.record.RecordStreamBuilder; -import com.hedera.node.app.workflows.prehandle.PreHandleContextImpl; -import java.security.InvalidKeyException; -import java.time.Instant; -import java.util.Set; -import java.util.function.Predicate; -import org.assertj.core.api.Condition; -import org.assertj.core.api.ThrowableAssert.ThrowingCallable; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.BDDMockito; -import org.mockito.Mockito; - -class AbstractScheduleHandlerTest extends ScheduleHandlerTestBase { - private static final SchedulableTransactionBody NULL_SCHEDULABLE_BODY = null; - private AbstractScheduleHandler testHandler; - private PreHandleContext realPreContext; - - @BeforeEach - void setUp() throws PreCheckException, InvalidKeyException { - setUpBase(); - testHandler = new TestScheduleHandler(); - } - - @Test - void validationOkReturnsSuccessForOKAndPending() { - for (final ResponseCodeEnum testValue : ResponseCodeEnum.values()) { - switch (testValue) { - case SUCCESS, OK, SCHEDULE_PENDING_EXPIRATION -> assertThat(testHandler.validationOk(testValue)) - .isTrue(); - default -> assertThat(testHandler.validationOk(testValue)).isFalse(); - } - } - } - - @Test - void preValidateVerifiesSchedulableAndID() throws PreCheckException { - // missing schedule or schedule ID should throw invalid ID - assertThatThrownBy(() -> testHandler.preValidate(scheduleStore, false, null)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.INVALID_SCHEDULE_ID)); - reset(writableById); - scheduleMapById.put(testScheduleID, null); - assertThatThrownBy(() -> testHandler.preValidate(scheduleStore, false, testScheduleID)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.INVALID_SCHEDULE_ID)); - for (final Schedule next : listOfScheduledOptions) { - final ScheduleID testId = next.scheduleId(); - reset(writableById); - // valid schedules should not throw - scheduleMapById.put(testId, next); - assertThatNoException().isThrownBy(() -> testHandler.preValidate(scheduleStore, false, testId)); - // scheduled without scheduled transaction should throw invalid transaction - final Schedule missingScheduled = next.copyBuilder() - .scheduledTransaction(NULL_SCHEDULABLE_BODY) - .build(); - reset(writableById); - scheduleMapById.put(testId, missingScheduled); - assertThatThrownBy(() -> testHandler.preValidate(scheduleStore, false, testId)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.INVALID_TRANSACTION)); - // Non-success codes returned by validate should become exceptions - reset(writableById); - scheduleMapById.put(testId, next.copyBuilder().executed(true).build()); - assertThatThrownBy(() -> testHandler.preValidate(scheduleStore, false, testId)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED)); - - reset(writableById); - scheduleMapById.put(testId, next.copyBuilder().deleted(true).build()); - assertThatThrownBy(() -> testHandler.preValidate(scheduleStore, false, testId)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.SCHEDULE_ALREADY_DELETED)); - } - } - - @Test - void validateVerifiesExecutionDeletionAndExpiration() { - assertThat(testHandler.validate(null, testConsensusTime, false)) - .isEqualTo(ResponseCodeEnum.INVALID_SCHEDULE_ID); - for (final Schedule next : listOfScheduledOptions) { - assertThat(testHandler.validate(next, testConsensusTime, false)).isEqualTo(ResponseCodeEnum.OK); - assertThat(testHandler.validate(next, testConsensusTime, true)).isEqualTo(ResponseCodeEnum.OK); - Schedule failures = next.copyBuilder() - .scheduledTransaction(NULL_SCHEDULABLE_BODY) - .build(); - assertThat(testHandler.validate(failures, testConsensusTime, false)) - .isEqualTo(ResponseCodeEnum.INVALID_TRANSACTION); - failures = next.copyBuilder().executed(true).build(); - assertThat(testHandler.validate(failures, testConsensusTime, false)) - .isEqualTo(ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED); - failures = next.copyBuilder().deleted(true).build(); - assertThat(testHandler.validate(failures, testConsensusTime, false)) - .isEqualTo(ResponseCodeEnum.SCHEDULE_ALREADY_DELETED); - final Instant consensusAfterExpiration = Instant.ofEpochSecond(next.calculatedExpirationSecond() + 5); - assertThat(testHandler.validate(next, consensusAfterExpiration, false)) - .isEqualTo(ResponseCodeEnum.INVALID_SCHEDULE_ID); - assertThat(testHandler.validate(next, consensusAfterExpiration, true)) - .isEqualTo(ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION); - } - } - - @Test - void verifyCheckTxnId() { - assertThatThrownBy(new CallCheckValid(null, testHandler)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.INVALID_TRANSACTION_ID)); - for (final Schedule next : listOfScheduledOptions) { - final TransactionID idToTest = next.originalCreateTransaction().transactionID(); - assertThatNoException().isThrownBy(new CallCheckValid(idToTest, testHandler)); - TransactionID brokenId = idToTest.copyBuilder().scheduled(true).build(); - assertThatThrownBy(new CallCheckValid(brokenId, testHandler)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST)); - brokenId = idToTest.copyBuilder().accountID((AccountID) null).build(); - assertThatThrownBy(new CallCheckValid(brokenId, testHandler)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.INVALID_SCHEDULE_PAYER_ID)); - brokenId = idToTest.copyBuilder().transactionValidStart(nullTime).build(); - assertThatThrownBy(new CallCheckValid(brokenId, testHandler)) - .is(new PreCheckExceptionMatch(ResponseCodeEnum.INVALID_TRANSACTION_START)); - } - } - - @Test - void verifyKeysForPreHandle() throws PreCheckException { - // Run through the "standard" schedules to ensure we handle the common cases - for (final Schedule next : listOfScheduledOptions) { - realPreContext = new PreHandleContextImpl( - mockStoreFactory, next.originalCreateTransaction(), testConfig, mockDispatcher); - Set keysObtained = testHandler.allKeysForTransaction(next, realPreContext); - // Should have no keys, because the mock dispatcher returns no keys - assertThat(keysObtained).isEmpty(); - } - // One check with a complex set of key returns, to ensure we process required and optional correctly. - final TransactionKeys testKeys = - new TestTransactionKeys(schedulerKey, Set.of(payerKey, adminKey), Set.of(optionKey, otherKey)); - // Must spy the context for this, the real dispatch would require calling other service handlers - PreHandleContext spyableContext = new PreHandleContextImpl( - mockStoreFactory, scheduleInState.originalCreateTransaction(), testConfig, mockDispatcher); - PreHandleContext spiedContext = BDDMockito.spy(spyableContext); - // given...return fails because it calls the real method before it can be replaced. - BDDMockito.doReturn(testKeys).when(spiedContext).allKeysForTransaction(any(), any()); - final Set keysObtained = testHandler.allKeysForTransaction(scheduleInState, spiedContext); - assertThat(keysObtained).isNotEmpty(); - assertThat(keysObtained).containsExactly(adminKey, optionKey, otherKey, payerKey); - } - - @Test - void verifyKeysForHandle() throws PreCheckException { - final TransactionKeys testKeys = - new TestTransactionKeys(schedulerKey, Set.of(payerKey, adminKey), Set.of(optionKey, schedulerKey)); - BDDMockito.given(mockContext.allKeysForTransaction(any(), any())).willReturn(testKeys); - final AccountID payerAccountId = schedulerAccount.accountId(); - BDDMockito.given(mockContext.payer()).willReturn(payerAccountId); - // This is how you get side-effects replicated, by having the "Answer" called in place of the real method. - BDDMockito.given(keyVerifier.verificationFor(any(), any())).will(new VerificationForAnswer(testKeys)); - // For this test, Context must mock `payer()`, `allKeysForTransaction()`, and `verificationFor` - // `verificationFor` is needed because we check verification in allKeysForTransaction to reduce - // the required keys set (potentially to empty) during handle. We must use an "Answer" for verification - // because verificationFor relies on side-effects for important results. - // Run through the "standard" schedules to ensure we handle the common cases - for (final Schedule next : listOfScheduledOptions) { - final ScheduleKeysResult verificationResult = testHandler.allKeysForTransaction(next, mockContext); - final Set keysRequired = verificationResult.remainingRequiredKeys(); - final Set keysObtained = verificationResult.updatedSignatories(); - // we *mock* verificationFor side effects, which is what fills in/clears the sets, - // so results should all be the same, despite empty signatories and mocked HandleContext. - // We do so based on verifier calls, so it still exercises the code to be tested, however. - // @todo('9447') add the schedulerKey back in. - // Note, however, we exclude the schedulerKey because it paid for the original create, so it - // is "deemed valid" and not included. - assertThat(keysRequired).isNotEmpty().hasSize(1).containsExactly(optionKey); - assertThat(keysObtained).isNotEmpty().hasSize(2).containsExactly(adminKey, payerKey); - } - } - - @SuppressWarnings("unchecked") - @Test - void verifyTryExecute() { - final var mockRecordBuilder = Mockito.mock(RecordStreamBuilder.class); - BDDMockito.given(mockContext.dispatchChildTransaction( - any(TransactionBody.class), - any(), - any(Predicate.class), - any(AccountID.class), - any(TransactionCategory.class), - any())) - .willReturn(mockRecordBuilder); - for (final Schedule testItem : listOfScheduledOptions) { - Set testRemaining = Set.of(); - final Set testSignatories = Set.of(adminKey, payerKey); - BDDMockito.given(mockRecordBuilder.status()).willReturn(ResponseCodeEnum.OK); - ResponseCodeEnum priorResponse = ResponseCodeEnum.SUCCESS; - assertThat(testHandler.tryToExecuteSchedule( - mockContext, testItem, testRemaining, testSignatories, priorResponse, false)) - .isTrue(); - priorResponse = ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION; - assertThat(testHandler.tryToExecuteSchedule( - mockContext, testItem, testRemaining, testSignatories, priorResponse, false)) - .isTrue(); - priorResponse = ResponseCodeEnum.SCHEDULE_PENDING_EXPIRATION; - assertThat(testHandler.tryToExecuteSchedule( - mockContext, testItem, testRemaining, testSignatories, priorResponse, true)) - .isFalse(); - BDDMockito.given(mockRecordBuilder.status()).willReturn(ResponseCodeEnum.INSUFFICIENT_ACCOUNT_BALANCE); - assertThatNoException() - .isThrownBy(() -> testHandler.tryToExecuteSchedule( - mockContext, testItem, testRemaining, testSignatories, ResponseCodeEnum.OK, false)); - } - } - - // Callable required by AssertJ throw assertions; unavoidable due to limitations on lambda syntax. - private static final class CallCheckValid implements ThrowingCallable { - private final TransactionID idToTest; - private final AbstractScheduleHandler testHandler; - - CallCheckValid(final TransactionID idToTest, final AbstractScheduleHandler testHandler) { - this.idToTest = idToTest; - this.testHandler = testHandler; - } - - @Override - public void call() throws PreCheckException { - testHandler.checkValidTransactionId(idToTest); - } - } - - private static final class TestScheduleHandler extends AbstractScheduleHandler {} - - private static final class PreCheckExceptionMatch extends Condition { - private final ResponseCodeEnum codeToMatch; - - PreCheckExceptionMatch(final ResponseCodeEnum codeToMatch) { - this.codeToMatch = codeToMatch; - } - - @Override - public boolean matches(final Throwable thrown) { - return thrown instanceof PreCheckException e && e.responseCode() == codeToMatch; - } - } - - private static final class HandleExceptionMatch extends Condition { - private final ResponseCodeEnum codeToMatch; - - HandleExceptionMatch(final ResponseCodeEnum codeToMatch) { - this.codeToMatch = codeToMatch; - } - - @Override - public boolean matches(final Throwable thrown) { - return thrown instanceof HandleException e && e.getStatus() == codeToMatch; - } - } -} diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/DispatchPredicateTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/DispatchPredicateTest.java deleted file mode 100644 index 5d6126607bb4..000000000000 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/DispatchPredicateTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (C) 2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.hedera.node.app.service.schedule.impl.handlers; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.KeyList; -import com.hedera.hapi.node.base.ThresholdKey; -import com.hedera.pbj.runtime.io.buffer.Bytes; -import java.util.HashSet; -import java.util.Set; -import java.util.function.Function; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Test; - -class DispatchPredicateTest { - private static final String A_NAME = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; - private static final String B_NAME = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"; - private static final String C_NAME = "cccccccccccccccccccccccccccccccc"; - private static final Function KEY_BUILDER = - value -> Key.newBuilder().ed25519(Bytes.wrap(value.getBytes())); - public static final Key A_THRESHOLD_KEY = Key.newBuilder() - .thresholdKey(ThresholdKey.newBuilder() - .threshold(2) - .keys(KeyList.newBuilder() - .keys( - KEY_BUILDER.apply(A_NAME).build(), - KEY_BUILDER.apply(B_NAME).build(), - KEY_BUILDER.apply(C_NAME).build()) - .build())) - .build(); - public static final Key A_COMPLEX_KEY = Key.newBuilder() - .thresholdKey(ThresholdKey.newBuilder() - .threshold(2) - .keys(KeyList.newBuilder() - .keys( - KEY_BUILDER.apply(A_NAME).build(), - KEY_BUILDER.apply(B_NAME).build(), - A_THRESHOLD_KEY))) - .build(); - public static final Key B_COMPLEX_KEY = Key.newBuilder() - .thresholdKey(ThresholdKey.newBuilder() - .threshold(2) - .keys(KeyList.newBuilder() - .keys( - KEY_BUILDER.apply(A_NAME).build(), - KEY_BUILDER.apply(B_NAME).build(), - A_COMPLEX_KEY))) - .build(); - - private Set validKeys; - private DispatchPredicate predicate; - - @BeforeEach - void setUp() { - validKeys = new HashSet<>(); - validKeys.add(A_COMPLEX_KEY); - validKeys.add(A_THRESHOLD_KEY); - predicate = new DispatchPredicate(validKeys); - } - - @Test - @DisplayName("Testing Constructor") - void testConstructor() { - assertThat(predicate).isNotNull(); - DispatchPredicate dispatchPredicate = new DispatchPredicate(validKeys); - assertThat(predicate).isNotEqualTo(dispatchPredicate); - assertThatThrownBy(() -> new DispatchPredicate(null)).isInstanceOf(NullPointerException.class); - } - - @Test - @DisplayName("Test for when predicate contains keys") - void testContainsKey() { - assertThat(predicate.test(A_COMPLEX_KEY)).isTrue(); - assertThat(predicate.test(B_COMPLEX_KEY)).isFalse(); - } - - @Test - @DisplayName("Test for when predicate is missing keys") - void testContainsKeyIsNotNull() { - assertThatThrownBy(() -> predicate.test(null)).isInstanceOf(NullPointerException.class); - } -} diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtilityTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtilityTest.java index 48036cbd4b35..b2811dea1f46 100644 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtilityTest.java +++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/HandlerUtilityTest.java @@ -19,8 +19,6 @@ import static org.assertj.core.api.BDDAssertions.assertThat; import com.hedera.hapi.node.base.AccountID; -import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.ScheduleID; import com.hedera.hapi.node.base.Timestamp; import com.hedera.hapi.node.scheduled.SchedulableTransactionBody.DataOneOfType; import com.hedera.hapi.node.state.schedule.Schedule; @@ -31,8 +29,6 @@ import java.time.Instant; import java.util.Collection; import java.util.Collections; -import java.util.Set; -import org.assertj.core.api.BDDAssertions; import org.assertj.core.api.Condition; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -66,77 +62,6 @@ void functionalityForTypeHandlesAllTypes() { } } - @Test - void markExecutedModifiesSchedule() { - // The utility method call should modify the return only by marking it executed, and - // setting resolution time. - // No other value should change, and the original Schedule should not change. - for (final Schedule expected : listOfScheduledOptions) { - final Schedule marked = HandlerUtility.markExecuted(expected, testConsensusTime); - assertThat(expected.executed()).isFalse(); - assertThat(marked.executed()).isTrue(); - assertThat(expected.hasResolutionTime()).isFalse(); - assertThat(marked.hasResolutionTime()).isTrue(); - assertThat(marked.resolutionTime()).isEqualTo(timestampFromInstant(testConsensusTime)); - - assertThat(marked.deleted()).isEqualTo(expected.deleted()); - assertThat(marked.signatories()).containsExactlyElementsOf(expected.signatories()); - - verifyPartialEquality(marked, expected); - assertThat(marked.scheduleId()).isEqualTo(expected.scheduleId()); - } - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - @Test - void replaceSignatoriesModifiesSchedule() { - // The utility method call should modify the return only by replacing signatories. - // No other value should change, and the original Schedule should not change. - final Set fakeSignatories = Set.of(schedulerKey, adminKey); - for (final Schedule expected : listOfScheduledOptions) { - final Schedule modified = HandlerUtility.replaceSignatories(expected, fakeSignatories); - // AssertJ is terrible at inverse conditions, and the condition definitions are REALLY bad - // Too much effort and confusing syntax for something that should be - // "assertThat(modified.signatories()).not().containsExactlyInAnyOrderElementsOf(expected.signatories())" - final var signatoryCondition = new ContainsAllElements(expected.signatories()); - assertThat(modified.signatories()).is(BDDAssertions.not(signatoryCondition)); - assertThat(modified.signatories()).containsExactlyInAnyOrderElementsOf(fakeSignatories); - - assertThat(modified.executed()).isEqualTo(expected.executed()); - assertThat(modified.resolutionTime()).isEqualTo(expected.resolutionTime()); - assertThat(modified.deleted()).isEqualTo(expected.deleted()); - - verifyPartialEquality(modified, expected); - assertThat(modified.scheduleId()).isEqualTo(expected.scheduleId()); - } - } - - @Test - void replaceSignatoriesAndMarkExecutedMakesBothModifications() { - // The utility method call should modify the return only by replacing signatories and setting executed to true. - // No other value should change, and the original Schedule should not change. - final Set fakeSignatories = Set.of(payerKey, adminKey); - for (final Schedule expected : listOfScheduledOptions) { - final Schedule modified = - HandlerUtility.replaceSignatoriesAndMarkExecuted(expected, fakeSignatories, testConsensusTime); - - // AssertJ is terrible at inverse conditions, and the condition definitions are REALLY bad - // Too much effort and confusing syntax for something that should be as simple as - // "assertThat(modified.signatories()).not().containsExactlyInAnyOrderElementsOf(expected.signatories())" - final ContainsAllElements signatoryCondition = new ContainsAllElements<>(expected.signatories()); - assertThat(modified.signatories()).is(BDDAssertions.not(signatoryCondition)); - assertThat(modified.signatories()).containsExactlyInAnyOrderElementsOf(fakeSignatories); - - assertThat(modified.executed()).isTrue(); - assertThat(modified.resolutionTime()).isNotEqualTo(expected.resolutionTime()); - assertThat(modified.resolutionTime()).isEqualTo(timestampFromInstant(testConsensusTime)); - assertThat(modified.deleted()).isEqualTo(expected.deleted()); - - verifyPartialEquality(modified, expected); - assertThat(modified.scheduleId()).isEqualTo(expected.scheduleId()); - } - } - @Test void createProvisionalScheduleCreatesCorrectSchedule() { // Creating a provisional schedule should produce the expected Schedule except for Schedule ID. @@ -160,32 +85,6 @@ void createProvisionalScheduleCreatesCorrectSchedule() { } } - @Test - void completeProvisionalScheduleModifiesWithNewId() { - final Set fakeSignatories = Set.of(payerKey, adminKey, schedulerKey); - final long testEntityNumber = 1791L; - // Completing a provisional schedule should produce the exact same Schedule except for Schedule ID. - for (final Schedule expected : listOfScheduledOptions) { - final TransactionBody createTransaction = expected.originalCreateTransaction(); - final AccountID baseId = createTransaction.transactionID().accountID(); - final ScheduleID expectedId = new ScheduleID(baseId.shardNum(), baseId.realmNum(), testEntityNumber); - final long maxLifeSeconds = scheduleConfig.maxExpirationFutureSeconds(); - final Schedule provisional = - HandlerUtility.createProvisionalSchedule(createTransaction, testConsensusTime, maxLifeSeconds); - final Schedule completed = - HandlerUtility.completeProvisionalSchedule(provisional, testEntityNumber, fakeSignatories); - - assertThat(completed.scheduleId()).isNotEqualTo(provisional.scheduleId()); - assertThat(completed.scheduleId()).isEqualTo(expectedId); - assertThat(completed.executed()).isEqualTo(provisional.executed()); - assertThat(completed.deleted()).isEqualTo(provisional.deleted()); - assertThat(completed.resolutionTime()).isEqualTo(provisional.resolutionTime()); - assertThat(completed.signatories()).containsExactlyElementsOf(fakeSignatories); - - verifyPartialEquality(completed, provisional); - } - } - /** * Verify that "actual" is equal to "expected" with respect to almost all values. *

The following attributes are not verified here: diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java index ce6331b35c03..81d0f3cf48a3 100644 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java +++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java @@ -16,7 +16,10 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static com.hedera.hapi.node.base.ResponseCodeEnum.ACCOUNT_ID_DOES_NOT_EXIST; +import static com.hedera.hapi.node.base.ResponseCodeEnum.IDENTICAL_SCHEDULE_ALREADY_CREATED; import static com.hedera.hapi.node.base.ResponseCodeEnum.MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED; +import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST; import static org.assertj.core.api.BDDAssertions.assertThat; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; @@ -24,7 +27,6 @@ import com.hedera.hapi.node.base.AccountID; import com.hedera.hapi.node.base.HederaFunctionality; import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.base.ScheduleID; import com.hedera.hapi.node.base.Timestamp; import com.hedera.hapi.node.base.TransactionID; @@ -37,6 +39,7 @@ import com.hedera.node.app.signature.impl.SignatureVerificationImpl; import com.hedera.node.app.spi.fixtures.Assertions; import com.hedera.node.app.spi.ids.EntityNumGenerator; +import com.hedera.node.app.spi.key.KeyComparator; import com.hedera.node.app.spi.signatures.VerificationAssistant; import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.PreCheckException; @@ -45,6 +48,7 @@ import java.security.InvalidKeyException; import java.time.InstantSource; import java.util.Set; +import java.util.concurrent.ConcurrentSkipListSet; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.BDDMockito; @@ -113,8 +117,7 @@ void preHandleMissingPayerThrowsInvalidPayer() throws PreCheckException { final TransactionBody createBody = scheduleCreateTransaction(payer); realPreContext = new PreHandleContextImpl(mockStoreFactory, createBody, testConfig, mockDispatcher); - Assertions.assertThrowsPreCheck( - () -> subject.preHandle(realPreContext), ResponseCodeEnum.ACCOUNT_ID_DOES_NOT_EXIST); + Assertions.assertThrowsPreCheck(() -> subject.preHandle(realPreContext), ACCOUNT_ID_DOES_NOT_EXIST); } @Test @@ -132,8 +135,7 @@ void preHandleRejectsNonWhitelist() throws PreCheckException { assertThat(realPreContext.payerKey()).isNotNull().isEqualTo(schedulerKey); } else { Assertions.assertThrowsPreCheck( - () -> subject.preHandle(realPreContext), - ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); + () -> subject.preHandle(realPreContext), SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); } } } @@ -142,46 +144,14 @@ void preHandleRejectsNonWhitelist() throws PreCheckException { void handleRejectsDuplicateTransaction() throws PreCheckException { final TransactionBody createTransaction = otherScheduleInState.originalCreateTransaction(); prepareContext(createTransaction, otherScheduleInState.scheduleId().scheduleNum() + 1); - throwsHandleException(() -> subject.handle(mockContext), ResponseCodeEnum.IDENTICAL_SCHEDULE_ALREADY_CREATED); - } - - @Test - void verifyPureChecks() throws PreCheckException { - final TransactionBody.Builder failures = alternateCreateTransaction.copyBuilder(); - final TransactionID originalId = alternateCreateTransaction.transactionID(); - Assertions.assertThrowsPreCheck(() -> subject.pureChecks(null), ResponseCodeEnum.INVALID_TRANSACTION_BODY); - failures.transactionID(nullTransactionId); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_TRANSACTION_ID); - TransactionID.Builder idErrors = originalId.copyBuilder().scheduled(true); - failures.transactionID(idErrors); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); - idErrors = originalId.copyBuilder().transactionValidStart(nullTime); - failures.transactionID(idErrors); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_TRANSACTION_START); - idErrors = originalId.copyBuilder().accountID(nullAccount); - failures.transactionID(idErrors); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_SCHEDULE_PAYER_ID); - failures.transactionID(originalId); - setLongTermError(failures, alternateCreateTransaction); - // The code here should be INVALID_LONG_TERM_SCHEDULE when/if that is added to response codes. - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_TRANSACTION); - } - - private void setLongTermError(final TransactionBody.Builder failures, final TransactionBody original) { - final var createBuilder = original.scheduleCreate().copyBuilder(); - createBuilder.waitForExpiry(true).expirationTime(nullTime); - failures.scheduleCreate(createBuilder); + throwsHandleException(() -> subject.handle(mockContext), IDENTICAL_SCHEDULE_ALREADY_CREATED); } @Test void handleRejectsNonWhitelist() throws HandleException, PreCheckException { final Set configuredWhitelist = scheduleConfig.whitelist().functionalitySet(); + given(keyVerifier.signingCryptoKeys()).willReturn(new ConcurrentSkipListSet<>(new KeyComparator())); for (final Schedule next : listOfScheduledOptions) { final TransactionBody createTransaction = next.originalCreateTransaction(); final TransactionID createId = createTransaction.transactionID(); @@ -194,8 +164,7 @@ void handleRejectsNonWhitelist() throws HandleException, PreCheckException { subject.handle(mockContext); verifyHandleSucceededForWhitelist(next, createId, startCount); } else { - throwsHandleException( - () -> subject.handle(mockContext), ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); + throwsHandleException(() -> subject.handle(mockContext), SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); } } } @@ -244,6 +213,7 @@ void handleExecutesImmediateIfPossible() throws HandleException, PreCheckExcepti // all keys are "valid" with this mock setup given(keyVerifier.verificationFor(BDDMockito.any(Key.class), BDDMockito.any(VerificationAssistant.class))) .willReturn(new SignatureVerificationImpl(nullKey, null, true)); + given(keyVerifier.signingCryptoKeys()).willReturn(new ConcurrentSkipListSet<>(new KeyComparator())); final int startCount = scheduleMapById.size(); if (configuredWhitelist.contains(functionType)) { subject.handle(mockContext); diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandlerTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandlerTest.java index 96d4e153b91f..9c8c899fd811 100644 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandlerTest.java +++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleDeleteHandlerTest.java @@ -16,6 +16,7 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.BDDAssertions.assertThat; import static org.mockito.BDDMockito.given; @@ -88,19 +89,6 @@ void failsIfScheduleIsImmutable() throws PreCheckException { () -> subject.preHandle(realPreContext), ResponseCodeEnum.SCHEDULE_IS_IMMUTABLE); } - @Test - void verifyPureChecks() throws PreCheckException { - final TransactionBody originalDelete = scheduleDeleteTransaction(testScheduleID); - final TransactionBody.Builder failures = originalDelete.copyBuilder(); - Assertions.assertThrowsPreCheck(() -> subject.pureChecks(null), ResponseCodeEnum.INVALID_TRANSACTION); - final var deleteBuilder = originalDelete.scheduleDelete().copyBuilder().scheduleID(nullScheduleId); - failures.scheduleDelete(deleteBuilder); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_SCHEDULE_ID); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(originalCreateTransaction), ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } - @Test void verifySimpleDelete() throws PreCheckException { final Schedule beforeDelete = scheduleStore.get(testScheduleID); @@ -132,7 +120,7 @@ void verifyHandleExceptionsForDelete() throws PreCheckException { final TransactionBody.Builder nextFailure = baseDelete.copyBuilder(); failures.scheduleID(nullScheduleId); prepareContext(nextFailure.scheduleDelete(failures).build()); - throwsHandleException(() -> subject.handle(mockContext), ResponseCodeEnum.INVALID_SCHEDULE_ID); + assertThatThrownBy(() -> subject.handle(mockContext)).isInstanceOf(NullPointerException.class); final Schedule failBase = listOfScheduledOptions.get(3); final Schedule noAdmin = failBase.copyBuilder().adminKey(nullKey).build(); diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandlerTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandlerTest.java index bd7fabb863b3..cd78bb53fd7c 100644 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandlerTest.java +++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleSignHandlerTest.java @@ -27,6 +27,7 @@ import com.hedera.hapi.node.state.schedule.Schedule; import com.hedera.hapi.node.transaction.TransactionBody; import com.hedera.node.app.spi.fixtures.Assertions; +import com.hedera.node.app.spi.key.KeyComparator; import com.hedera.node.app.spi.signatures.VerificationAssistant; import com.hedera.node.app.spi.workflows.HandleException; import com.hedera.node.app.spi.workflows.PreCheckException; @@ -38,6 +39,7 @@ import java.util.Collections; import java.util.LinkedHashSet; import java.util.Set; +import java.util.concurrent.ConcurrentSkipListSet; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.BDDMockito; @@ -83,36 +85,6 @@ void vanillaWithOptionalPayerSet() throws PreCheckException { assertThat(realPreContext.optionalNonPayerKeys()).isNotEqualTo(Collections.emptySet()); } - @Test - void verifyPureChecks() throws PreCheckException { - final TransactionBody originalSign = scheduleSignTransaction(null); - final TransactionBody.Builder failures = originalSign.copyBuilder(); - final TransactionID originalId = originalSign.transactionID(); - Assertions.assertThrowsPreCheck(() -> subject.pureChecks(null), ResponseCodeEnum.INVALID_TRANSACTION_BODY); - failures.transactionID(nullTransactionId); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_TRANSACTION_ID); - TransactionID.Builder idErrors = originalId.copyBuilder().scheduled(true); - failures.transactionID(idErrors); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST); - idErrors = originalId.copyBuilder().transactionValidStart(nullTime); - failures.transactionID(idErrors); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_TRANSACTION_START); - idErrors = originalId.copyBuilder().accountID(nullAccount); - failures.transactionID(idErrors); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_SCHEDULE_PAYER_ID); - failures.transactionID(originalId); - final var signBuilder = originalSign.scheduleSign().copyBuilder().scheduleID(nullScheduleId); - failures.scheduleSign(signBuilder); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(failures.build()), ResponseCodeEnum.INVALID_SCHEDULE_ID); - Assertions.assertThrowsPreCheck( - () -> subject.pureChecks(originalCreateTransaction), ResponseCodeEnum.INVALID_TRANSACTION_BODY); - } - @Test void verifySignatoriesAreUpdatedWithoutExecution() throws PreCheckException { int successCount = 0; @@ -139,10 +111,6 @@ void verifyErrorConditions() throws PreCheckException { prepareContext(signTransaction); throwsHandleException(() -> subject.handle(mockContext), ResponseCodeEnum.INVALID_SCHEDULE_ID); - // verify we fail when the wrong transaction type is sent - prepareContext(alternateCreateTransaction); - throwsHandleException(() -> subject.handle(mockContext), ResponseCodeEnum.INVALID_TRANSACTION); - // verify we fail a sign for a deleted transaction. // Use an arbitrary schedule from the big list for this. Schedule deleteTest = listOfScheduledOptions.get(3); @@ -175,7 +143,7 @@ void handleExecutesImmediateIfPossible() throws HandleException, PreCheckExcepti private void verifyAllSignatories(final Schedule original, final TransactionKeys expectedKeys) { final Set combinedSet = new LinkedHashSet<>(5); combinedSet.addAll(expectedKeys.requiredNonPayerKeys()); - combinedSet.addAll(expectedKeys.optionalNonPayerKeys()); + combinedSet.add(expectedKeys.payerKey()); verifySignatorySet(original, combinedSet); } @@ -202,10 +170,13 @@ private Set prepareContext(final TransactionBody signTransaction) throws Pr // We leave out "other" key from the "valid" keys for that reason. final Set acceptedKeys = Set.of(payerKey, optionKey); final TestTransactionKeys accepted = new TestTransactionKeys(payerKey, acceptedKeys, Collections.emptySet()); + final var signingSet = new ConcurrentSkipListSet<>(new KeyComparator()); + signingSet.addAll(acceptedKeys); + given(keyVerifier.signingCryptoKeys()).willReturn(signingSet); // This is how you get side-effects replicated, by having the "Answer" called in place of the real method. given(keyVerifier.verificationFor(BDDMockito.any(Key.class), BDDMockito.any(VerificationAssistant.class))) .will(new VerificationForAnswer(accepted)); - return acceptedKeys; // return the expected set of signatories after the transaction is handled. + return Set.of(payerKey); // return the expected set of signatories after the transaction is handled. } private void prepareContextAllPass(final TransactionBody signTransaction) throws PreCheckException { @@ -213,6 +184,9 @@ private void prepareContextAllPass(final TransactionBody signTransaction) throws given(mockContext.allKeysForTransaction(Mockito.any(), Mockito.any())).willReturn(testChildKeys); // for signature verification to succeed, the "Answer" needs to be "valid" for all keys final Set allKeys = Set.of(payerKey, adminKey, schedulerKey, optionKey, otherKey); + final var signingSet = new ConcurrentSkipListSet<>(new KeyComparator()); + signingSet.addAll(allKeys); + given(keyVerifier.signingCryptoKeys()).willReturn(signingSet); final TestTransactionKeys allRequired = new TestTransactionKeys(payerKey, allKeys, Collections.emptySet()); // This is how you get side-effects replicated, by having the "Answer" called in place of the real method. given(keyVerifier.verificationFor(BDDMockito.any(Key.class), BDDMockito.any(VerificationAssistant.class))) diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/TestTransactionKeys.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/TestTransactionKeys.java index 723c377b374f..a2f85231a3c7 100644 --- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/TestTransactionKeys.java +++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/TestTransactionKeys.java @@ -16,6 +16,8 @@ package com.hedera.node.app.service.schedule.impl.handlers; +import static java.util.Collections.emptySet; + import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.state.token.Account; import com.hedera.node.app.spi.workflows.TransactionKeys; @@ -47,7 +49,7 @@ public Set requiredNonPayerKeys() { @Override public Set requiredHollowAccounts() { - return null; + return emptySet(); } @Override diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/gas/DispatchType.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/gas/DispatchType.java index c6dc4a476b2a..98526a90afd3 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/gas/DispatchType.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/gas/DispatchType.java @@ -59,7 +59,12 @@ public enum DispatchType { TOKEN_UPDATE_NFTS(HederaFunctionality.TOKEN_UPDATE_NFTS, DEFAULT), UTIL_PRNG(HederaFunctionality.UTIL_PRNG, DEFAULT), TOKEN_INFO(HederaFunctionality.TOKEN_GET_INFO, DEFAULT), - UPDATE_TOKEN_CUSTOM_FEES(HederaFunctionality.TOKEN_FEE_SCHEDULE_UPDATE, DEFAULT); + UPDATE_TOKEN_CUSTOM_FEES(HederaFunctionality.TOKEN_FEE_SCHEDULE_UPDATE, DEFAULT), + TOKEN_AIRDROP(HederaFunctionality.TOKEN_AIRDROP, DEFAULT), + TOKEN_CLAIM_AIRDROP(HederaFunctionality.TOKEN_CLAIM_AIRDROP, DEFAULT), + TOKEN_CANCEL_AIRDROP(HederaFunctionality.TOKEN_CANCEL_AIRDROP, DEFAULT), + TOKEN_REJECT_FT(HederaFunctionality.TOKEN_REJECT, TOKEN_FUNGIBLE_COMMON), + TOKEN_REJECT_NFT(HederaFunctionality.TOKEN_REJECT, TOKEN_NON_FUNGIBLE_UNIQUE); private final HederaFunctionality functionality; private final SubType subtype; diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HasTranslatorsModule.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HasTranslatorsModule.java index 501130cfba89..c6f376fa7750 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HasTranslatorsModule.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HasTranslatorsModule.java @@ -24,6 +24,7 @@ import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.hederaaccountnumalias.HederaAccountNumAliasTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.isauthorizedraw.IsAuthorizedRawTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.isvalidalias.IsValidAliasTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations.SetUnlimitedAutoAssociationsTranslator; import dagger.Module; import dagger.Provides; import dagger.multibindings.IntoSet; @@ -99,4 +100,13 @@ static CallTranslator provideIsAuthorizedRawTranslator( @NonNull final IsAuthorizedRawTranslator translator) { return translator; } + + @Provides + @Singleton + @IntoSet + @Named("HasTranslators") + static CallTranslator provideSetUnlimitedAutoAssociationsTranslator( + @NonNull final SetUnlimitedAutoAssociationsTranslator translator) { + return translator; + } } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HtsTranslatorsModule.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HtsTranslatorsModule.java index 31b1e074dccc..f3a3598c56bb 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HtsTranslatorsModule.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/processors/HtsTranslatorsModule.java @@ -18,10 +18,13 @@ import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.CallTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops.TokenAirdropTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.allowance.GetAllowanceTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.associations.AssociationsTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.balanceof.BalanceOfTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.burn.BurnTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.create.CreateTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.customfees.TokenCustomFeesTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.decimals.DecimalsTranslator; @@ -43,6 +46,7 @@ import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.nfttokeninfo.NftTokenInfoTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ownerof.OwnerOfTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.pauses.PausesTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens.RejectTokensTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.setapproval.SetApprovalForAllTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.symbol.SymbolTranslator; import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.tokenexpiry.TokenExpiryTranslator; @@ -438,4 +442,40 @@ static CallTranslator provideUpdateNFTsMetadataTranslator( @NonNull final UpdateNFTsMetadataTranslator translator) { return translator; } + + @Provides + @Singleton + @IntoSet + @Named("HtsTranslators") + static CallTranslator provideTokenAirdropTranslator( + @NonNull final TokenAirdropTranslator translator) { + return translator; + } + + @Provides + @Singleton + @IntoSet + @Named("HtsTranslators") + static CallTranslator provideTokenClaimAirdropDecoder( + @NonNull final TokenClaimAirdropTranslator translator) { + return translator; + } + + @Provides + @Singleton + @IntoSet + @Named("HtsTranslators") + static CallTranslator provideTokenCancelAirdropTranslator( + @NonNull final TokenCancelAirdropTranslator translator) { + return translator; + } + + @Provides + @Singleton + @IntoSet + @Named("HtsTranslators") + static CallTranslator provideTokenRejectsTranslator( + @NonNull final RejectTokensTranslator translator) { + return translator; + } } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleHederaOperations.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleHederaOperations.java index 8bb0bfb45f3c..dc2b225baea1 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleHederaOperations.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleHederaOperations.java @@ -33,7 +33,6 @@ import com.hedera.hapi.node.base.ContractID; import com.hedera.hapi.node.base.Duration; import com.hedera.hapi.node.base.HederaFunctionality; -import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.contract.ContractCreateTransactionBody; import com.hedera.hapi.node.contract.ContractFunctionResult; import com.hedera.hapi.node.token.CryptoCreateTransactionBody; @@ -447,22 +446,20 @@ private ExternalizedRecordCustomizer contractBodyCustomizerFor( }; } + /** + * Standardizes the given {@link ContractCreateTransactionBody} to not include initcode, gas, and initial balance + * values as these parameters are only set on the top-level HAPI transactions. + * + * @param createdNumber the number of the created contract + * @param op the operation to standardize + * @return the standardized operation + */ private ContractCreateTransactionBody standardized( final long createdNumber, @NonNull final ContractCreateTransactionBody op) { - var standardAdminKey = op.adminKey(); - if (op.hasAdminKey()) { - final var adminNum = - op.adminKeyOrThrow().contractIDOrElse(ContractID.DEFAULT).contractNumOrElse(0L); - // For mono-service fidelity, don't set an explicit admin key for a self-managed contract - if (createdNumber == adminNum) { - standardAdminKey = null; - } - } - if (needsStandardization(op, standardAdminKey)) { - // Initial balance, gas, and initcode are only set on top-level HAPI transactions + if (needsStandardization(op)) { return new ContractCreateTransactionBody( com.hedera.hapi.node.contract.codec.ContractCreateTransactionBodyProtoCodec.INITCODE_SOURCE_UNSET, - standardAdminKey, + op.adminKey(), 0L, 0L, op.proxyAccountID(), @@ -481,8 +478,7 @@ private ContractCreateTransactionBody standardized( } } - private boolean needsStandardization( - @NonNull final ContractCreateTransactionBody op, @Nullable final Key standardAdminKey) { - return op.hasInitcode() || op.gas() > 0L || op.initialBalance() > 0L || standardAdminKey != op.adminKey(); + private boolean needsStandardization(@NonNull final ContractCreateTransactionBody op) { + return op.hasInitcode() || op.gas() > 0L || op.initialBalance() > 0L; } } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleSystemContractOperations.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleSystemContractOperations.java index 0928a72ce33c..cf38a9baa14e 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleSystemContractOperations.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/HandleSystemContractOperations.java @@ -63,17 +63,19 @@ public HandleSystemContractOperations(@NonNull final HandleContext context, @Nul this.maybeEthSenderKey = maybeEthSenderKey; } - /** - * {@inheritDoc} - */ @Override - public @NonNull Predicate activeSignatureTestWith(@NonNull final VerificationStrategy strategy) { + public @NonNull Predicate primitiveSignatureTestWith(@NonNull final VerificationStrategy strategy) { + requireNonNull(strategy); + return strategy.asPrimitiveSignatureTestIn(context, maybeEthSenderKey); + } + + @NonNull + @Override + public Predicate signatureTestWith(@NonNull final VerificationStrategy strategy) { + requireNonNull(strategy); return strategy.asSignatureTestIn(context, maybeEthSenderKey); } - /** - * {@inheritDoc} - */ @Override public @NonNull T dispatch( @NonNull final TransactionBody syntheticBody, @@ -87,7 +89,7 @@ public HandleSystemContractOperations(@NonNull final HandleContext context, @Nul return context.dispatchChildTransaction( syntheticBody, recordBuilderClass, - activeSignatureTestWith(strategy), + primitiveSignatureTestWith(strategy), syntheticPayerId, CHILD, HandleContext.ConsensusThrottling.ON); @@ -139,9 +141,6 @@ public Transaction syntheticTransactionForNativeCall( return transactionWith(transactionBody); } - /** - * {@inheritDoc} - */ @Override @NonNull public ExchangeRate currentExchangeRate() { diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/QuerySystemContractOperations.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/QuerySystemContractOperations.java index 085bd06677cd..89e9217af5d6 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/QuerySystemContractOperations.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/QuerySystemContractOperations.java @@ -75,7 +75,13 @@ public ContractCallStreamBuilder externalizePreemptedDispatch( } @Override - public @NonNull Predicate activeSignatureTestWith(@NonNull final VerificationStrategy strategy) { + public @NonNull Predicate primitiveSignatureTestWith(@NonNull final VerificationStrategy strategy) { + throw new UnsupportedOperationException("Cannot compute a signature test"); + } + + @NonNull + @Override + public Predicate signatureTestWith(@NonNull final VerificationStrategy strategy) { throw new UnsupportedOperationException("Cannot compute a signature test"); } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/SystemContractOperations.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/SystemContractOperations.java index d52e0ab80196..d34e8e894503 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/SystemContractOperations.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/SystemContractOperations.java @@ -68,14 +68,27 @@ ContractCallStreamBuilder externalizePreemptedDispatch( @NonNull HederaFunctionality functionality); /** - * Returns a {@link Predicate} that tests whether the given {@link Key} is active based on the - * given verification strategy. + * Returns a {@link Predicate} that tests whether a primitive {@link Key} has signed + * based on the given verification strategy. Used when dispatching a synthetic + * transaction, as the workflow expects only a primitive signature test. * * @param strategy the verification strategy to use - * @return a {@link Predicate} that tests whether the given {@link Key} is active + * @return a {@link Predicate} that tests whether a primitive {@link Key} is active */ @NonNull - Predicate activeSignatureTestWith(@NonNull VerificationStrategy strategy); + Predicate primitiveSignatureTestWith(@NonNull VerificationStrategy strategy); + + /** + * Returns a {@link Predicate} that tests whether a {@link Key} structure has an + * active signature based on the given verification strategy. Used when checking + * whether the workflow will judge an account's key to have signed a dispatch, and + * hence whether a debit should be switched to an approval. + * + * @param strategy the verification strategy to use + * @return a test whether a {@link Key} structure has an active signature + */ + @NonNull + Predicate signatureTestWith(@NonNull VerificationStrategy strategy); /** * Attempts to create a child record of the current record, with the given {@code result}. diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/VerificationStrategy.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/VerificationStrategy.java index bad13d0834ac..24c3fd88761c 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/VerificationStrategy.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/scope/VerificationStrategy.java @@ -18,8 +18,8 @@ import static java.util.Objects.requireNonNull; +import com.hedera.hapi.node.base.HederaFunctionality; import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.KeyList; import com.hedera.node.app.spi.workflows.HandleContext; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -60,7 +60,6 @@ enum Decision { *

  • {@link Key.KeyOneOfType#ED25519}
  • *
  • {@link Key.KeyOneOfType#ECDSA_SECP256K1}
  • * - * C.f. {@link #isPrimitive(Key)}. * * @param key the key whose signature is to be verified * @return a decision on whether to verify the signature, or delegate back to the crypto engine results @@ -68,77 +67,43 @@ enum Decision { Decision decideForPrimitive(@NonNull Key key); /** - * Returns a predicate that tests whether a given key is a valid signature for a given key - * given this strategy within the given {@link HandleContext}. - * - * @param context the context in which this strategy will be used - * @param maybeEthSenderKey the key that is the sender of the EVM message, if known - * @return a predicate that tests whether a given key is a valid signature for a given key + * Returns a predicate that tests whether a given key structure has a valid signature for this strategy + * with the given {@link HandleContext} and, if applicable, the key used by the sender of an + * {@link HederaFunctionality#ETHEREUM_TRANSACTION}. + * @param context the context in which this strategy is being used + * @param maybeEthSenderKey the key that is the sender of the EVM message, if applicable + * @return a predicate that tests whether a given key structure has a valid signature in this context */ default Predicate asSignatureTestIn( @NonNull final HandleContext context, @Nullable final Key maybeEthSenderKey) { requireNonNull(context); - return new Predicate<>() { - @Override - public boolean test(@NonNull final Key key) { - requireNonNull(key); - return switch (key.key().kind()) { - case KEY_LIST -> { - final var keys = key.keyListOrThrow().keys(); - for (final var childKey : keys) { - if (!test(childKey)) { - yield false; - } - } - yield !keys.isEmpty(); - } - case THRESHOLD_KEY -> { - final var thresholdKey = key.thresholdKeyOrThrow(); - final var keyList = thresholdKey.keysOrElse(KeyList.DEFAULT); - final var keys = keyList.keys(); - final var threshold = thresholdKey.threshold(); - final var clampedThreshold = Math.max(1, Math.min(threshold, keys.size())); - var passed = 0; - for (final var childKey : keys) { - if (test(childKey)) { - passed++; - } - if (passed >= clampedThreshold) { - yield true; - } - } - yield false; - } - default -> { - if (isPrimitive(key)) { - yield switch (decideForPrimitive(key)) { - case VALID -> true; - case INVALID -> false; - // Note the EthereumTransaction sender's key has necessarily signed - case DELEGATE_TO_CRYPTOGRAPHIC_VERIFICATION -> Objects.equals(key, maybeEthSenderKey) - || context.keyVerifier() - .verificationFor(key) - .passed(); - }; - } - yield false; - } - }; - } - }; + final var callback = asPrimitiveSignatureTestIn(context, maybeEthSenderKey); + return key -> context.keyVerifier() + .verificationFor(key, (k, v) -> callback.test(k)) + .passed(); } /** - * Returns whether the given key is a primitive key. + * Returns a predicate that tests whether a given primitive key has a valid signature for this strategy + * with the given {@link HandleContext} and, if applicable, the key used by the sender of an + * {@link HederaFunctionality#ETHEREUM_TRANSACTION}. * - * @param key the key to test - * @return whether the given key is a primitive key + * @param context the context in which this strategy is being used + * @param maybeEthSenderKey the key that is the sender of the EVM message, if applicable + * @return a predicate that tests whether a given primitive key has a valid signature in this context */ - static boolean isPrimitive(@NonNull final Key key) { - requireNonNull(key); - return switch (key.key().kind()) { - case CONTRACT_ID, DELEGATABLE_CONTRACT_ID, ED25519, ECDSA_SECP256K1 -> true; - default -> false; + default Predicate asPrimitiveSignatureTestIn( + @NonNull final HandleContext context, @Nullable final Key maybeEthSenderKey) { + requireNonNull(context); + return key -> { + requireNonNull(key); + return switch (decideForPrimitive(key)) { + case VALID -> true; + case INVALID -> false; + // Note the Ethereum sender's key has necessarily signed + case DELEGATE_TO_CRYPTOGRAPHIC_VERIFICATION -> Objects.equals(key, maybeEthSenderKey) + || context.keyVerifier().verificationFor(key).passed(); + }; }; } } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/common/AbstractCallAttempt.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/common/AbstractCallAttempt.java index d22842249ce0..22727f816e96 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/common/AbstractCallAttempt.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/common/AbstractCallAttempt.java @@ -265,10 +265,15 @@ public boolean isSelector(@NonNull final Function... functions) { * @param configEnabled whether the config is enabled * @return boolean result */ + @Deprecated public boolean isSelectorIfConfigEnabled(@NonNull final Function function, final boolean configEnabled) { return configEnabled && isSelector(function); } + public boolean isSelectorIfConfigEnabled(final boolean configEnabled, @NonNull final Function... functions) { + return configEnabled && isSelector(functions); + } + private boolean isRedirectSelector(@NonNull final byte[] functionSelector, @NonNull final byte[] input) { return Arrays.equals(input, 0, functionSelector.length, functionSelector, 0, functionSelector.length); } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsCall.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsCall.java new file mode 100644 index 000000000000..92308199bf3c --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsCall.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations; + +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ReturnTypes.encodedRc; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ReturnTypes.standardized; +import static java.util.Objects.requireNonNull; + +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategy; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.AbstractCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.HasCallAttempt; +import com.hedera.node.app.service.contract.impl.records.ContractCallStreamBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.hyperledger.besu.evm.frame.MessageFrame; + +public class SetUnlimitedAutoAssociationsCall extends AbstractCall { + + private final AccountID sender; + private final TransactionBody transactionBody; + private final VerificationStrategy verificationStrategy; + + public SetUnlimitedAutoAssociationsCall( + @NonNull final HasCallAttempt attempt, @NonNull final TransactionBody transactionBody) { + super(attempt.systemContractGasCalculator(), attempt.enhancement(), false); + this.sender = attempt.senderId(); + this.transactionBody = requireNonNull(transactionBody); + this.verificationStrategy = attempt.defaultVerificationStrategy(); + } + + @NonNull + @Override + public PricedResult execute(@NonNull final MessageFrame frame) { + requireNonNull(frame); + final var recordBuilder = systemContractOperations() + .dispatch(transactionBody, verificationStrategy, sender, ContractCallStreamBuilder.class); + + final var gasRequirement = gasCalculator.gasRequirement(transactionBody, DispatchType.CRYPTO_UPDATE, sender); + return completionWith(gasRequirement, recordBuilder, encodedRc(standardized(recordBuilder.status()))); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsTranslator.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsTranslator.java new file mode 100644 index 000000000000..fe9d89716448 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsTranslator.java @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations; + +import static java.util.Objects.requireNonNull; + +import com.esaulpaugh.headlong.abi.Function; +import com.hedera.hapi.node.token.CryptoUpdateTransactionBody; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.AbstractCallTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.Call; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.HasCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ReturnTypes; +import com.hedera.node.config.data.ContractsConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import javax.inject.Inject; +import javax.inject.Singleton; + +@Singleton +public class SetUnlimitedAutoAssociationsTranslator extends AbstractCallTranslator { + + public static final Function SET_UNLIMITED_AUTO_ASSOC = + new Function("setUnlimitedAutomaticAssociations(bool)", ReturnTypes.INT_64); + + private static final int UNLIMITED_AUTO_ASSOCIATIONS = -1; + private static final int NO_AUTO_ASSOCIATIONS = 0; + + @Inject + public SetUnlimitedAutoAssociationsTranslator() { + // Dagger2 + } + + @Override + public boolean matches(@NonNull final HasCallAttempt attempt) { + final var setUnlimitedAutoAssocEnabled = attempt.configuration() + .getConfigData(ContractsConfig.class) + .systemContractSetUnlimitedAutoAssociationsEnabled(); + return attempt.isSelectorIfConfigEnabled(SET_UNLIMITED_AUTO_ASSOC, setUnlimitedAutoAssocEnabled); + } + + @Override + public Call callFrom(@NonNull final HasCallAttempt attempt) { + requireNonNull(attempt); + final var call = SET_UNLIMITED_AUTO_ASSOC.decodeCall(attempt.inputBytes()); + final var setUnlimitedAutoAssociations = (boolean) call.get(0); + return new SetUnlimitedAutoAssociationsCall(attempt, bodyFor(attempt, setUnlimitedAutoAssociations)); + } + + @NonNull + private TransactionBody bodyFor(@NonNull final HasCallAttempt attempt, final boolean setUnlimitedAutoAssociations) { + final var cryptoUpdate = CryptoUpdateTransactionBody.newBuilder() + .accountIDToUpdate(attempt.redirectAccountId()) + .maxAutomaticTokenAssociations( + setUnlimitedAutoAssociations ? UNLIMITED_AUTO_ASSOCIATIONS : NO_AUTO_ASSOCIATIONS) + .build(); + return TransactionBody.newBuilder().cryptoUpdateAccount(cryptoUpdate).build(); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/airdrops/TokenAirdropDecoder.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/airdrops/TokenAirdropDecoder.java new file mode 100644 index 000000000000..0ec657739482 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/airdrops/TokenAirdropDecoder.java @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_RECEIVING_NODE_ACCOUNT; +import static com.hedera.hapi.node.base.ResponseCodeEnum.TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED; +import static com.hedera.node.app.spi.workflows.HandleException.validateFalse; + +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.AccountAmount; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.base.NftTransfer; +import com.hedera.hapi.node.base.TokenTransferList; +import com.hedera.hapi.node.token.TokenAirdropTransactionBody; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.utils.ConversionUtils; +import com.hedera.node.config.data.LedgerConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Arrays; +import javax.inject.Inject; +import javax.inject.Singleton; + +@Singleton +public class TokenAirdropDecoder { + + // Tuple indexes + private static final int TOKEN = 0; + private static final int TOKEN_TRANSFERS = 1; + private static final int NFT_AMOUNT = 2; + private static final int TOKEN_ACCOUNT_ID = 0; + private static final int TOKEN_AMOUNT = 1; + private static final int TOKEN_IS_APPROVAL = 2; + private static final int NFT_SENDER = 0; + private static final int NFT_RECEIVER = 1; + private static final int NFT_SERIAL = 2; + private static final int NFT_IS_APPROVAL = 3; + + // Validation constant + private static final Long LAST_RESERVED_SYSTEM_ACCOUNT = 1000L; + + @Inject + public TokenAirdropDecoder() { + // Dagger2 + } + + public TransactionBody decodeAirdrop(@NonNull final HtsCallAttempt attempt) { + final var call = TokenAirdropTranslator.TOKEN_AIRDROP.decodeCall(attempt.inputBytes()); + final var transferList = (Tuple[]) call.get(0); + final var ledgerConfig = attempt.configuration().getConfigData(LedgerConfig.class); + final var tokenAirdrop = bodyForAirdrop(transferList, attempt.addressIdConverter(), ledgerConfig); + return TransactionBody.newBuilder().tokenAirdrop(tokenAirdrop).build(); + } + + private TokenAirdropTransactionBody bodyForAirdrop( + @NonNull final Tuple[] transferList, + @NonNull final AddressIdConverter addressIdConverter, + @NonNull final LedgerConfig ledgerConfig) { + final var transferBuilderList = new ArrayList(); + validateSemantics(transferList, ledgerConfig); + Arrays.stream(transferList).forEach(transfer -> { + final var tokenTransferList = TokenTransferList.newBuilder(); + final var token = ConversionUtils.asTokenId(transfer.get(TOKEN)); + tokenTransferList.token(token); + final var tokenAmountsTuple = (Tuple[]) transfer.get(TOKEN_TRANSFERS); + final var nftAmountsTuple = (Tuple[]) transfer.get(NFT_AMOUNT); + if (tokenAmountsTuple.length > 0) { + final var aaList = new ArrayList(); + Arrays.stream(tokenAmountsTuple).forEach(tokenAmount -> { + final var amount = (long) tokenAmount.get(TOKEN_AMOUNT); + final var account = addressIdConverter.convert(tokenAmount.get(TOKEN_ACCOUNT_ID)); + // Check if the receiver is a system account + if (amount > 0) { + checkForSystemAccount(account); + } + final var isApproval = (boolean) tokenAmount.get(TOKEN_IS_APPROVAL); + aaList.add(AccountAmount.newBuilder() + .amount(amount) + .accountID(account) + .isApproval(isApproval) + .build()); + }); + tokenTransferList.transfers(aaList); + } + if (nftAmountsTuple.length > 0) { + final var nftAmount = nftAmountsTuple[0]; + final var serial = (long) nftAmount.get(NFT_SERIAL); + final var sender = addressIdConverter.convert(nftAmount.get(NFT_SENDER)); + final var receiver = addressIdConverter.convert(nftAmount.get(NFT_RECEIVER)); + checkForSystemAccount(receiver); + final var isApproval = (boolean) nftAmount.get(NFT_IS_APPROVAL); + tokenTransferList.nftTransfers(NftTransfer.newBuilder() + .senderAccountID(sender) + .receiverAccountID(receiver) + .serialNumber(serial) + .isApproval(isApproval) + .build()); + } + transferBuilderList.add(tokenTransferList.build()); + }); + return TokenAirdropTransactionBody.newBuilder() + .tokenTransfers(transferBuilderList) + .build(); + } + + private void validateSemantics(@NonNull final Tuple[] transferList, @NonNull final LedgerConfig ledgerConfig) { + var fungibleBalanceChanges = 0; + var nftBalanceChanges = 0; + for (final var airdrop : transferList) { + fungibleBalanceChanges += ((Tuple[]) airdrop.get(1)).length; + nftBalanceChanges += ((Tuple[]) airdrop.get(2)).length; + validateFalse( + fungibleBalanceChanges > ledgerConfig.tokenTransfersMaxLen(), + TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED); + validateFalse( + nftBalanceChanges > ledgerConfig.nftTransfersMaxLen(), TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED); + } + } + + private void checkForSystemAccount(@NonNull final AccountID account) { + validateFalse(account.accountNumOrThrow() <= LAST_RESERVED_SYSTEM_ACCOUNT, INVALID_RECEIVING_NODE_ACCOUNT); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/airdrops/TokenAirdropTranslator.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/airdrops/TokenAirdropTranslator.java new file mode 100644 index 000000000000..a6695f1eef83 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/airdrops/TokenAirdropTranslator.java @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops; + +import static java.util.Objects.requireNonNull; + +import com.esaulpaugh.headlong.abi.Function; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.AbstractCallTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.Call; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.config.data.ContractsConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import javax.inject.Inject; + +public class TokenAirdropTranslator extends AbstractCallTranslator { + + public static final Function TOKEN_AIRDROP = + new Function("airdropTokens((address,(address,int64,bool)[],(address,address,int64,bool)[])[])", "(int32)"); + + private final TokenAirdropDecoder decoder; + + @Inject + public TokenAirdropTranslator(@NonNull final TokenAirdropDecoder decoder) { + requireNonNull(decoder); + this.decoder = decoder; + } + + @Override + public boolean matches(@NonNull final HtsCallAttempt attempt) { + final var airdropEnabled = + attempt.configuration().getConfigData(ContractsConfig.class).systemContractAirdropTokensEnabled(); + return attempt.isSelectorIfConfigEnabled(TOKEN_AIRDROP, airdropEnabled); + } + + public static long gasRequirement( + @NonNull final TransactionBody body, + @NonNull final SystemContractGasCalculator systemContractGasCalculator, + @NonNull final HederaWorldUpdater.Enhancement enhancement, + @NonNull final AccountID payerId) { + return systemContractGasCalculator.gasRequirement(body, DispatchType.TOKEN_AIRDROP, payerId); + } + + @Override + public Call callFrom(@NonNull final HtsCallAttempt attempt) { + return new DispatchForResponseCodeHtsCall( + attempt, decoder.decodeAirdrop(attempt), TokenAirdropTranslator::gasRequirement); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropDecoder.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropDecoder.java new file mode 100644 index 000000000000..08f2e0308912 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropDecoder.java @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TOKEN_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.PENDING_AIRDROP_ID_LIST_TOO_LONG; +import static com.hedera.node.app.service.contract.impl.utils.ConversionUtils.asTokenId; +import static com.hedera.node.app.spi.workflows.HandleException.validateFalse; +import static com.hedera.node.app.spi.workflows.HandleException.validateTrue; + +import com.esaulpaugh.headlong.abi.Address; +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.base.NftID; +import com.hedera.hapi.node.base.PendingAirdropId; +import com.hedera.hapi.node.base.TokenID; +import com.hedera.hapi.node.base.TokenType; +import com.hedera.hapi.node.token.TokenCancelAirdropTransactionBody; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.config.data.TokensConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Arrays; +import javax.inject.Inject; + +public class TokenCancelAirdropDecoder { + + // Tuple indexes + // Indexes for CANCEL_AIRDROP + // cancelAirdrops((address,address,address,int64)[]) + private static final int TRANSFER_LIST = 0; + private static final int SENDER = 0; + private static final int RECEIVER = 1; + private static final int TOKEN = 2; + private static final int SERIAL = 3; + + // Indexes for HRC_CANCEL_AIRDROP_FT and HRC_CANCEL_AIRDROP_NFT + // cancelAirdropFT(address) + // cancelAirdropNFT(address,int64) + private static final int HRC_RECEIVER = 0; + private static final int HRC_SERIAL = 1; + + @Inject + public TokenCancelAirdropDecoder() { + // Dagger2 + } + + public TransactionBody decodeCancelAirdrop(@NonNull final HtsCallAttempt attempt) { + final var call = TokenCancelAirdropTranslator.CANCEL_AIRDROP.decodeCall(attempt.inputBytes()); + final var maxPendingAirdropsToCancel = + attempt.configuration().getConfigData(TokensConfig.class).maxAllowedPendingAirdropsToCancel(); + final var transferList = (Tuple[]) call.get(TRANSFER_LIST); + validateFalse(transferList.length > maxPendingAirdropsToCancel, PENDING_AIRDROP_ID_LIST_TOO_LONG); + + final var pendingAirdrops = Arrays.stream(transferList) + .map(transfer -> { + final var senderAddress = (Address) transfer.get(SENDER); + final var receiverAddress = (Address) transfer.get(RECEIVER); + final var tokenAddress = (Address) transfer.get(TOKEN); + final var serial = (long) transfer.get(SERIAL); + + final var senderId = attempt.addressIdConverter().convert(senderAddress); + final var receiverId = attempt.addressIdConverter().convert(receiverAddress); + final var tokenId = asTokenId(tokenAddress); + + final var token = attempt.enhancement().nativeOperations().getToken(tokenId.tokenNum()); + validateTrue(token != null, INVALID_TOKEN_ID); + if (token.tokenType().equals(TokenType.FUNGIBLE_COMMON)) { + return pendingFTAirdrop(senderId, receiverId, tokenId); + } else { + return pendingNFTAirdrop(senderId, receiverId, tokenId, serial); + } + }) + .toList(); + + return TransactionBody.newBuilder() + .tokenCancelAirdrop( + TokenCancelAirdropTransactionBody.newBuilder().pendingAirdrops(pendingAirdrops)) + .build(); + } + + public TransactionBody decodeCancelAirdropFT(@NonNull final HtsCallAttempt attempt) { + final var call = TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_FT.decodeCall(attempt.inputBytes()); + + final var senderId = attempt.senderId(); + final var receiverAddress = (Address) call.get(HRC_RECEIVER); + final var token = attempt.redirectTokenId(); + validateTrue(token != null, INVALID_TOKEN_ID); + final var receiverId = attempt.addressIdConverter().convert(receiverAddress); + + return TransactionBody.newBuilder() + .tokenCancelAirdrop(TokenCancelAirdropTransactionBody.newBuilder() + .pendingAirdrops(pendingFTAirdrop(senderId, receiverId, token))) + .build(); + } + + public TransactionBody decodeCancelAirdropNFT(@NonNull final HtsCallAttempt attempt) { + final var call = TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_NFT.decodeCall(attempt.inputBytes()); + + final var senderId = attempt.senderId(); + final var receiverAddress = (Address) call.get(HRC_RECEIVER); + final var serial = (long) call.get(HRC_SERIAL); + final var token = attempt.redirectTokenId(); + validateTrue(token != null, INVALID_TOKEN_ID); + final var receiverId = attempt.addressIdConverter().convert(receiverAddress); + + return TransactionBody.newBuilder() + .tokenCancelAirdrop(TokenCancelAirdropTransactionBody.newBuilder() + .pendingAirdrops(pendingNFTAirdrop(senderId, receiverId, token, serial))) + .build(); + } + + private PendingAirdropId pendingFTAirdrop( + @NonNull final AccountID senderId, @NonNull final AccountID receiverId, @NonNull final TokenID tokenId) { + return PendingAirdropId.newBuilder() + .senderId(senderId) + .receiverId(receiverId) + .fungibleTokenType(tokenId) + .build(); + } + + private PendingAirdropId pendingNFTAirdrop( + @NonNull final AccountID senderId, + @NonNull final AccountID receiverId, + @NonNull final TokenID tokenId, + final long serial) { + return PendingAirdropId.newBuilder() + .senderId(senderId) + .receiverId(receiverId) + .nonFungibleToken(NftID.newBuilder().tokenId(tokenId).serialNumber(serial)) + .build(); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropTranslator.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropTranslator.java new file mode 100644 index 000000000000..3515e354776e --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropTranslator.java @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops; + +import static java.util.Objects.requireNonNull; + +import com.esaulpaugh.headlong.abi.Function; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.AbstractCallTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.Call; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ReturnTypes; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.config.data.ContractsConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import javax.inject.Inject; + +public class TokenCancelAirdropTranslator extends AbstractCallTranslator { + + // Actual signature definition with struct name before flattening + // cancelAirdrops(PendingAirdrop[]) + public static final Function CANCEL_AIRDROP = + new Function("cancelAirdrops((address,address,address,int64)[])", ReturnTypes.INT_64); + public static final Function HRC_CANCEL_AIRDROP_FT = new Function("cancelAirdropFT(address)", ReturnTypes.INT_64); + public static final Function HRC_CANCEL_AIRDROP_NFT = + new Function("cancelAirdropNFT(address,int64)", ReturnTypes.INT_64); + + private final TokenCancelAirdropDecoder decoder; + + @Inject + public TokenCancelAirdropTranslator(@NonNull final TokenCancelAirdropDecoder decoder) { + requireNonNull(decoder); + this.decoder = decoder; + } + + @Override + public boolean matches(@NonNull final HtsCallAttempt attempt) { + final var cancelAirdropEnabled = + attempt.configuration().getConfigData(ContractsConfig.class).systemContractCancelAirdropsEnabled(); + return attempt.isTokenRedirect() + ? attempt.isSelectorIfConfigEnabled(cancelAirdropEnabled, HRC_CANCEL_AIRDROP_FT, HRC_CANCEL_AIRDROP_NFT) + : attempt.isSelectorIfConfigEnabled(cancelAirdropEnabled, CANCEL_AIRDROP); + } + + @Override + public Call callFrom(@NonNull final HtsCallAttempt attempt) { + return new DispatchForResponseCodeHtsCall( + attempt, bodyFor(attempt), TokenCancelAirdropTranslator::gasRequirement); + } + + private TransactionBody bodyFor(@NonNull final HtsCallAttempt attempt) { + if (attempt.isSelector(CANCEL_AIRDROP)) { + return decoder.decodeCancelAirdrop(attempt); + } else if (attempt.isSelector(HRC_CANCEL_AIRDROP_FT)) { + return decoder.decodeCancelAirdropFT(attempt); + } else { + return decoder.decodeCancelAirdropNFT(attempt); + } + } + + public static long gasRequirement( + @NonNull final TransactionBody body, + @NonNull final SystemContractGasCalculator systemContractGasCalculator, + @NonNull final HederaWorldUpdater.Enhancement enhancement, + @NonNull final AccountID payerId) { + return systemContractGasCalculator.gasRequirement(body, DispatchType.TOKEN_CANCEL_AIRDROP, payerId); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropDecoder.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropDecoder.java new file mode 100644 index 000000000000..433dafee8d5c --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropDecoder.java @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TOKEN_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.PENDING_AIRDROP_ID_LIST_TOO_LONG; +import static com.hedera.node.app.service.contract.impl.utils.ConversionUtils.asTokenId; +import static com.hedera.node.app.spi.workflows.HandleException.validateFalse; +import static com.hedera.node.app.spi.workflows.HandleException.validateTrue; + +import com.esaulpaugh.headlong.abi.Address; +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.base.NftID; +import com.hedera.hapi.node.base.PendingAirdropId; +import com.hedera.hapi.node.base.TokenID; +import com.hedera.hapi.node.base.TokenType; +import com.hedera.hapi.node.token.TokenClaimAirdropTransactionBody; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.config.data.TokensConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Arrays; +import javax.inject.Inject; +import javax.inject.Singleton; + +@Singleton +public class TokenClaimAirdropDecoder { + + // Tuple indexes + private static final int TRANSFER_LIST = 0; + private static final int SENDER = 0; + private static final int RECEIVER = 1; + private static final int TOKEN = 2; + private static final int SERIAL = 3; + private static final int HRC_SENDER = 0; + private static final int HRC_SERIAL = 1; + + @Inject + public TokenClaimAirdropDecoder() { + // Dagger2 + } + + public TransactionBody decodeTokenClaimAirdrop(@NonNull final HtsCallAttempt attempt) { + final var call = TokenClaimAirdropTranslator.CLAIM_AIRDROP.decodeCall(attempt.inputBytes()); + final var maxPendingAirdropsToClaim = + attempt.configuration().getConfigData(TokensConfig.class).maxAllowedPendingAirdropsToClaim(); + validateFalse(((Tuple[]) call.get(0)).length > maxPendingAirdropsToClaim, PENDING_AIRDROP_ID_LIST_TOO_LONG); + + final var transferList = (Tuple[]) call.get(TRANSFER_LIST); + final var pendingAirdrops = new ArrayList(); + Arrays.stream(transferList).forEach(transfer -> { + final var senderAddress = (Address) transfer.get(SENDER); + final var receiverAddress = (Address) transfer.get(RECEIVER); + final var tokenAddress = (Address) transfer.get(TOKEN); + final var serial = (long) transfer.get(SERIAL); + + final var senderId = attempt.addressIdConverter().convert(senderAddress); + final var receiverId = attempt.addressIdConverter().convert(receiverAddress); + final var tokenId = asTokenId(tokenAddress); + + final var token = attempt.enhancement().nativeOperations().getToken(tokenId.tokenNum()); + validateTrue(token != null, INVALID_TOKEN_ID); + if (token.tokenType().equals(TokenType.FUNGIBLE_COMMON)) { + pendingAirdrops.add(pendingFTAirdrop(senderId, receiverId, tokenId)); + } else { + pendingAirdrops.add(pendingNFTAirdrop(senderId, receiverId, tokenId, serial)); + } + }); + + return TransactionBody.newBuilder() + .tokenClaimAirdrop(TokenClaimAirdropTransactionBody.newBuilder().pendingAirdrops(pendingAirdrops)) + .build(); + } + + public TransactionBody decodeHrcClaimAirdropFt(@NonNull final HtsCallAttempt attempt) { + final var call = TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_FT.decodeCall(attempt.inputBytes()); + + // As the Token Claim is an operation for the receiver of an Airdrop, + // hence the `transaction sender` in the HRC scenario is in reality the `Airdrop receiver`. + final var receiverId = attempt.senderId(); + final var senderAddress = (Address) call.get(HRC_SENDER); + final var token = attempt.redirectTokenId(); + validateTrue(token != null, INVALID_TOKEN_ID); + final var senderId = attempt.addressIdConverter().convert(senderAddress); + + return TransactionBody.newBuilder() + .tokenClaimAirdrop(TokenClaimAirdropTransactionBody.newBuilder() + .pendingAirdrops(pendingFTAirdrop(senderId, receiverId, token))) + .build(); + } + + public TransactionBody decodeHrcClaimAirdropNft(@NonNull final HtsCallAttempt attempt) { + final var call = TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_NFT.decodeCall(attempt.inputBytes()); + + // As the Token Claim is an operation for the receiver of an Airdrop, + // hence the `transaction sender` in the HRC scenario is in reality the `Airdrop receiver`. + final var receiverId = attempt.senderId(); + final var senderAddress = (Address) call.get(HRC_SENDER); + final var serial = (long) call.get(HRC_SERIAL); + final var token = attempt.redirectTokenId(); + validateTrue(token != null, INVALID_TOKEN_ID); + final var senderId = attempt.addressIdConverter().convert(senderAddress); + + return TransactionBody.newBuilder() + .tokenClaimAirdrop(TokenClaimAirdropTransactionBody.newBuilder() + .pendingAirdrops(pendingNFTAirdrop(senderId, receiverId, token, serial))) + .build(); + } + + private PendingAirdropId pendingFTAirdrop( + @NonNull final AccountID senderId, @NonNull final AccountID receiverId, @NonNull final TokenID tokenId) { + return PendingAirdropId.newBuilder() + .senderId(senderId) + .receiverId(receiverId) + .fungibleTokenType(tokenId) + .build(); + } + + private PendingAirdropId pendingNFTAirdrop( + @NonNull final AccountID senderId, + @NonNull final AccountID receiverId, + @NonNull final TokenID tokenId, + final long serial) { + return PendingAirdropId.newBuilder() + .senderId(senderId) + .receiverId(receiverId) + .nonFungibleToken(NftID.newBuilder().tokenId(tokenId).serialNumber(serial)) + .build(); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropTranslator.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropTranslator.java new file mode 100644 index 000000000000..325b7ec4a20d --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropTranslator.java @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops; + +import com.esaulpaugh.headlong.abi.Function; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.AbstractCallTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.Call; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ReturnTypes; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.config.data.ContractsConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import javax.inject.Inject; + +public class TokenClaimAirdropTranslator extends AbstractCallTranslator { + public static final Function CLAIM_AIRDROP = + new Function("claimAirdrops((address,address,address,int64)[])", ReturnTypes.INT_64); + public static final Function HRC_CLAIM_AIRDROP_FT = new Function("claimAirdropFT(address)", ReturnTypes.INT_64); + public static final Function HRC_CLAIM_AIRDROP_NFT = + new Function("claimAirdropNFT(address,int64)", ReturnTypes.INT_64); + + private final TokenClaimAirdropDecoder decoder; + + @Inject + public TokenClaimAirdropTranslator(@NonNull final TokenClaimAirdropDecoder decoder) { + this.decoder = decoder; + } + + @Override + public boolean matches(@NonNull final HtsCallAttempt attempt) { + final var claimAirdropEnabled = + attempt.configuration().getConfigData(ContractsConfig.class).systemContractClaimAirdropsEnabled(); + return attempt.isTokenRedirect() + ? attempt.isSelectorIfConfigEnabled(HRC_CLAIM_AIRDROP_FT, claimAirdropEnabled) + || attempt.isSelectorIfConfigEnabled(HRC_CLAIM_AIRDROP_NFT, claimAirdropEnabled) + : attempt.isSelectorIfConfigEnabled(CLAIM_AIRDROP, claimAirdropEnabled); + } + + @Override + public Call callFrom(@NonNull final HtsCallAttempt attempt) { + return new DispatchForResponseCodeHtsCall( + attempt, + attempt.isSelector(CLAIM_AIRDROP) ? bodyForClassic(attempt) : bodyForHRC(attempt), + TokenClaimAirdropTranslator::gasRequirement); + } + + public static long gasRequirement( + @NonNull final TransactionBody body, + @NonNull final SystemContractGasCalculator systemContractGasCalculator, + @NonNull final HederaWorldUpdater.Enhancement enhancement, + @NonNull final AccountID payerId) { + return systemContractGasCalculator.gasRequirement(body, DispatchType.TOKEN_CLAIM_AIRDROP, payerId); + } + + private TransactionBody bodyForClassic(@NonNull final HtsCallAttempt attempt) { + return decoder.decodeTokenClaimAirdrop(attempt); + } + + private TransactionBody bodyForHRC(@NonNull final HtsCallAttempt attempt) { + if (attempt.isSelector(HRC_CLAIM_AIRDROP_FT)) { + return decoder.decodeHrcClaimAirdropFt(attempt); + } else { + return decoder.decodeHrcClaimAirdropNft(attempt); + } + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/rejecttokens/RejectTokensDecoder.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/rejecttokens/RejectTokensDecoder.java new file mode 100644 index 000000000000..89893d03f56e --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/rejecttokens/RejectTokensDecoder.java @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED; +import static com.hedera.node.app.service.contract.impl.utils.ConversionUtils.asTokenId; +import static com.hedera.node.app.spi.workflows.HandleException.validateFalse; + +import com.esaulpaugh.headlong.abi.Address; +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.NftID; +import com.hedera.hapi.node.token.TokenReference; +import com.hedera.hapi.node.token.TokenRejectTransactionBody; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.config.data.LedgerConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import javax.inject.Inject; +import javax.inject.Singleton; + +@Singleton +public class RejectTokensDecoder { + + // Tuple indexes + // Indexes for TOKEN_REJECT + // rejectTokens(address,address[],(address,int64)[]) + private static final int OWNER_ADDRESS_INDEX = 0; + private static final int FUNGIBLE_ADDRESS_INDEX = 1; + private static final int NFT_IDS_INDEX = 2; + // Indexes for NftID struct (address,int64) + private static final int NFT_ID_ADDRESS_INDEX = 0; + private static final int NFT_ID_SERIAL_INDEX = 1; + + // Indexes for HRC_TOKEN_REJECT_NFT + // rejectTokenNFTs(int64[]) + private static final int HRC_NFT_SERIAL_INDEX = 0; + + @Inject + public RejectTokensDecoder() { + // Dagger2 + } + + public TransactionBody decodeTokenRejects(@NonNull final HtsCallAttempt attempt) { + final var call = RejectTokensTranslator.TOKEN_REJECT.decodeCall(attempt.inputBytes()); + final var maxRejections = + attempt.configuration().getConfigData(LedgerConfig.class).tokenRejectsMaxLen(); + final Address[] ftAddresses = call.get(FUNGIBLE_ADDRESS_INDEX); + final Tuple[] nftIds = call.get(NFT_IDS_INDEX); + validateFalse(ftAddresses.length + nftIds.length > maxRejections, TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED); + final var owner = (Address) call.get(OWNER_ADDRESS_INDEX); + final var ownerId = attempt.addressIdConverter().convert(owner); + var referenceList = new ArrayList(); + for (Address ftAddress : ftAddresses) { + final var tokenReference = TokenReference.newBuilder() + .fungibleToken(asTokenId(ftAddress)) + .build(); + referenceList.add(tokenReference); + } + for (Tuple nftId : nftIds) { + final var nftIdAddress = (Address) nftId.get(NFT_ID_ADDRESS_INDEX); + final var nftIdSerial = (long) nftId.get(NFT_ID_SERIAL_INDEX); + final var nftReference = TokenReference.newBuilder() + .nft(NftID.newBuilder() + .tokenId(asTokenId(nftIdAddress)) + .serialNumber(nftIdSerial) + .build()) + .build(); + referenceList.add(nftReference); + } + + return TransactionBody.newBuilder() + .tokenReject( + TokenRejectTransactionBody.newBuilder().owner(ownerId).rejections(referenceList)) + .build(); + } + + public TransactionBody decodeHrcTokenRejectFT(@NonNull final HtsCallAttempt attempt) { + final var token = attempt.redirectTokenId(); + final var sender = attempt.senderId(); + final var tokenReference = + TokenReference.newBuilder().fungibleToken(token).build(); + return TransactionBody.newBuilder() + .tokenReject( + TokenRejectTransactionBody.newBuilder().owner(sender).rejections(tokenReference)) + .build(); + } + + public TransactionBody decodeHrcTokenRejectNFT(@NonNull final HtsCallAttempt attempt) { + final var maxRejections = + attempt.configuration().getConfigData(LedgerConfig.class).tokenRejectsMaxLen(); + final var call = RejectTokensTranslator.HRC_TOKEN_REJECT_NFT.decodeCall(attempt.inputBytes()); + final var serials = (long[]) call.get(HRC_NFT_SERIAL_INDEX); + validateFalse(serials.length > maxRejections, TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED); + final var token = attempt.redirectTokenId(); + final var sender = attempt.senderId(); + var referenceList = new ArrayList(); + for (long serial : serials) { + final var tokenReference = TokenReference.newBuilder() + .nft(NftID.newBuilder().tokenId(token).serialNumber(serial).build()) + .build(); + referenceList.add(tokenReference); + } + return TransactionBody.newBuilder() + .tokenReject( + TokenRejectTransactionBody.newBuilder().owner(sender).rejections(referenceList)) + .build(); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/rejecttokens/RejectTokensTranslator.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/rejecttokens/RejectTokensTranslator.java new file mode 100644 index 000000000000..e90d627e9029 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/rejecttokens/RejectTokensTranslator.java @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens; + +import com.esaulpaugh.headlong.abi.Function; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.AbstractCallTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.common.Call; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.ReturnTypes; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.config.data.ContractsConfig; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import javax.inject.Inject; + +public class RejectTokensTranslator extends AbstractCallTranslator { + public static final Function TOKEN_REJECT = + new Function("rejectTokens(address,address[],(address,int64)[])", ReturnTypes.INT_64); + public static final Function HRC_TOKEN_REJECT_FT = new Function("rejectTokenFT()", ReturnTypes.INT_64); + public static final Function HRC_TOKEN_REJECT_NFT = new Function("rejectTokenNFTs(int64[])", ReturnTypes.INT_64); + + private final RejectTokensDecoder decoder; + private final Map gasCalculators = new HashMap<>(); + + @Inject + public RejectTokensTranslator(@NonNull final RejectTokensDecoder decoder) { + this.decoder = decoder; + gasCalculators.put(TOKEN_REJECT, RejectTokensTranslator::gasRequirement); + gasCalculators.put(HRC_TOKEN_REJECT_FT, RejectTokensTranslator::gasRequirementHRCFungible); + gasCalculators.put(HRC_TOKEN_REJECT_NFT, RejectTokensTranslator::gasRequirementHRCNft); + } + + @Override + public boolean matches(@NonNull final HtsCallAttempt attempt) { + final var rejectEnabled = + attempt.configuration().getConfigData(ContractsConfig.class).systemContractRejectTokensEnabled(); + return attempt.isTokenRedirect() + ? attempt.isSelectorIfConfigEnabled(rejectEnabled, HRC_TOKEN_REJECT_FT, HRC_TOKEN_REJECT_NFT) + : attempt.isSelectorIfConfigEnabled(TOKEN_REJECT, rejectEnabled); + } + + @Override + public Call callFrom(@NonNull final HtsCallAttempt attempt) { + final var gasRequirement = gasCalculators.entrySet().stream() + .filter(entry -> attempt.isSelector(entry.getKey())) + .map(Entry::getValue) + .findFirst(); + return new DispatchForResponseCodeHtsCall(attempt, bodyFor(attempt), gasRequirement.get()); + } + + public static long gasRequirementHRCFungible( + @NonNull final TransactionBody body, + @NonNull final SystemContractGasCalculator systemContractGasCalculator, + @NonNull final HederaWorldUpdater.Enhancement enhancement, + @NonNull final AccountID payerId) { + return systemContractGasCalculator.gasRequirement(body, DispatchType.TOKEN_REJECT_FT, payerId); + } + + public static long gasRequirementHRCNft( + @NonNull final TransactionBody body, + @NonNull final SystemContractGasCalculator systemContractGasCalculator, + @NonNull final HederaWorldUpdater.Enhancement enhancement, + @NonNull final AccountID payerId) { + return systemContractGasCalculator.gasRequirement(body, DispatchType.TOKEN_REJECT_NFT, payerId); + } + + public static long gasRequirement( + @NonNull final TransactionBody body, + @NonNull final SystemContractGasCalculator systemContractGasCalculator, + @NonNull final HederaWorldUpdater.Enhancement enhancement, + @NonNull final AccountID payerId) { + final var accumulatedCanonicalPricing = body.tokenReject().rejections().stream() + .map(rejection -> { + if (rejection.hasFungibleToken()) { + return systemContractGasCalculator.canonicalPriceInTinycents(DispatchType.TOKEN_REJECT_FT); + } else { + return systemContractGasCalculator.canonicalPriceInTinycents(DispatchType.TOKEN_REJECT_NFT); + } + }) + .reduce(0L, Long::sum); + return systemContractGasCalculator.gasRequirementWithTinycents(body, payerId, accumulatedCanonicalPricing); + } + + private TransactionBody bodyFor(@NonNull HtsCallAttempt attempt) { + return attempt.isSelector(TOKEN_REJECT) ? bodyForClassic(attempt) : bodyForHRC(attempt); + } + + private TransactionBody bodyForClassic(@NonNull final HtsCallAttempt attempt) { + return decoder.decodeTokenRejects(attempt); + } + + private TransactionBody bodyForHRC(@NonNull final HtsCallAttempt attempt) { + if (attempt.isSelector(HRC_TOKEN_REJECT_FT)) { + return decoder.decodeHrcTokenRejectFT(attempt); + } else { + return decoder.decodeHrcTokenRejectNFT(attempt); + } + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/transfer/ClassicTransfersCall.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/transfer/ClassicTransfersCall.java index 316ad50d23e0..3730ac488482 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/transfer/ClassicTransfersCall.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/exec/systemcontracts/hts/transfer/ClassicTransfersCall.java @@ -150,7 +150,7 @@ public ClassicTransfersCall( .cryptoTransfer(requireNonNull(approvalSwitchHelper) .switchToApprovalsAsNeededIn( syntheticTransfer.cryptoTransferOrThrow(), - systemContractOperations().activeSignatureTestWith(verificationStrategy), + systemContractOperations().signatureTestWith(verificationStrategy), nativeOperations(), senderId)) .build() diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/state/ProxyEvmAccount.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/state/ProxyEvmAccount.java index 043274a00355..ccc2df4bada8 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/state/ProxyEvmAccount.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/com/hedera/node/app/service/contract/impl/state/ProxyEvmAccount.java @@ -46,7 +46,9 @@ public class ProxyEvmAccount extends AbstractProxyEvmAccount { // hbarAllowance(address spender) 0xbbee989e, // hbarApprove(address spender, int256 amount) - 0x86aff07c); + 0x86aff07c, + // setUnlimitedAutomaticAssociations(bool enableAutoAssociations + 0xf5677e99); // Only pass in a non-null account address if the function selector is eligible for proxy redirection. // A null address will return the 0x bytecode. diff --git a/hedera-node/hedera-smart-contract-service-impl/src/main/java/module-info.java b/hedera-node/hedera-smart-contract-service-impl/src/main/java/module-info.java index bca30951fe7d..f049466e17fc 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/main/java/module-info.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/main/java/module-info.java @@ -79,4 +79,6 @@ opens com.hedera.node.app.service.contract.impl.exec.tracers to com.hedera.node.app.service.contract.impl.test; + + exports com.hedera.node.app.service.contract.impl.annotations; } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/ActiveContractVerificationStrategyTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/ActiveContractVerificationStrategyTest.java index 907c07d6ae43..08943048e83c 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/ActiveContractVerificationStrategyTest.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/ActiveContractVerificationStrategyTest.java @@ -16,13 +16,9 @@ package com.hedera.node.app.service.contract.impl.test.exec.scope; -import static com.hedera.node.app.service.contract.impl.test.TestHelpers.ANOTHER_ED25519_KEY; -import static com.hedera.node.app.service.contract.impl.test.TestHelpers.AN_ED25519_KEY; import static com.hedera.node.app.service.contract.impl.test.TestHelpers.A_SECP256K1_KEY; import static com.hedera.node.app.service.contract.impl.test.TestHelpers.B_SECP256K1_KEY; -import static com.hedera.node.app.service.contract.impl.test.TestHelpers.YET_ANOTHER_ED25519_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.doCallRealMethod; @@ -30,8 +26,6 @@ import com.hedera.hapi.node.base.ContractID; import com.hedera.hapi.node.base.Key; -import com.hedera.hapi.node.base.KeyList; -import com.hedera.hapi.node.base.ThresholdKey; import com.hedera.node.app.service.contract.impl.exec.scope.ActiveContractVerificationStrategy; import com.hedera.node.app.service.contract.impl.exec.scope.ActiveContractVerificationStrategy.UseTopLevelSigs; import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategy; @@ -142,11 +136,11 @@ void validatesKeysAsExpectedWhenDelegatePermissionRequiredAndNotUsingTopLevelSig @Test void signatureTestApprovesEthSenderKeyWhenDelegating() { final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, A_SECP256K1_KEY); + doCallRealMethod().when(subject).asPrimitiveSignatureTestIn(context, A_SECP256K1_KEY); given(subject.decideForPrimitive(A_SECP256K1_KEY)) .willReturn(VerificationStrategy.Decision.DELEGATE_TO_CRYPTOGRAPHIC_VERIFICATION); - final var test = subject.asSignatureTestIn(context, A_SECP256K1_KEY); + final var test = subject.asPrimitiveSignatureTestIn(context, A_SECP256K1_KEY); assertTrue(test.test(A_SECP256K1_KEY)); } @@ -155,90 +149,14 @@ void signatureTestUsesContextVerificationWhenNotEthSenderKey() { final var keyVerifier = mock(KeyVerifier.class); final var verification = mock(SignatureVerification.class); final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, null); + doCallRealMethod().when(subject).asPrimitiveSignatureTestIn(context, null); given(verification.passed()).willReturn(true); given(context.keyVerifier()).willReturn(keyVerifier); given(keyVerifier.verificationFor(B_SECP256K1_KEY)).willReturn(verification); given(subject.decideForPrimitive(B_SECP256K1_KEY)) .willReturn(VerificationStrategy.Decision.DELEGATE_TO_CRYPTOGRAPHIC_VERIFICATION); - final var test = subject.asSignatureTestIn(context, null); + final var test = subject.asPrimitiveSignatureTestIn(context, null); assertTrue(test.test(B_SECP256K1_KEY)); } - - @Test - void signatureTestApprovesAllValidKeyLists() { - final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, null); - given(subject.decideForPrimitive(AN_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - given(subject.decideForPrimitive(ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - given(subject.decideForPrimitive(YET_ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - - final var test = subject.asSignatureTestIn(context, null); - final var key = Key.newBuilder() - .keyList(KeyList.newBuilder().keys(AN_ED25519_KEY, ANOTHER_ED25519_KEY, YET_ANOTHER_ED25519_KEY)) - .build(); - assertTrue(test.test(key)); - } - - @Test - void signatureTestRejectsIncompleteKeyLists() { - final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, null); - given(subject.decideForPrimitive(AN_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - given(subject.decideForPrimitive(ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.INVALID); - - final var test = subject.asSignatureTestIn(context, null); - final var key = Key.newBuilder() - .keyList(KeyList.newBuilder().keys(AN_ED25519_KEY, ANOTHER_ED25519_KEY, YET_ANOTHER_ED25519_KEY)) - .build(); - assertFalse(test.test(key)); - } - - @Test - void signatureTestApprovesSufficientThresholdKeys() { - final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, null); - given(subject.decideForPrimitive(AN_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - given(subject.decideForPrimitive(ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.INVALID); - given(subject.decideForPrimitive(YET_ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - - final var test = subject.asSignatureTestIn(context, null); - final var key = Key.newBuilder() - .thresholdKey(ThresholdKey.newBuilder() - .threshold(2) - .keys(KeyList.newBuilder().keys(AN_ED25519_KEY, ANOTHER_ED25519_KEY, YET_ANOTHER_ED25519_KEY)) - .build()) - .build(); - assertTrue(test.test(key)); - } - - @Test - void signatureTestRejectsInsufficientThresholdKeys() { - final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, null); - given(subject.decideForPrimitive(AN_ED25519_KEY)).willReturn(VerificationStrategy.Decision.VALID); - given(subject.decideForPrimitive(ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.INVALID); - given(subject.decideForPrimitive(YET_ANOTHER_ED25519_KEY)).willReturn(VerificationStrategy.Decision.INVALID); - - final var test = subject.asSignatureTestIn(context, null); - final var key = Key.newBuilder() - .thresholdKey(ThresholdKey.newBuilder() - .threshold(2) - .keys(KeyList.newBuilder().keys(AN_ED25519_KEY, ANOTHER_ED25519_KEY, YET_ANOTHER_ED25519_KEY)) - .build()) - .build(); - assertFalse(test.test(key)); - } - - @Test - void unsupportedKeyTypesAreNotPrimitive() { - final var subject = mock(VerificationStrategy.class); - doCallRealMethod().when(subject).asSignatureTestIn(context, null); - - final var aRsa3072Key = Key.newBuilder().rsa3072(Bytes.wrap("NONSENSE")).build(); - - final var test = subject.asSignatureTestIn(context, null); - assertFalse(test.test(aRsa3072Key)); - } } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleHederaOperationsTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleHederaOperationsTest.java index 83196b6050ed..79014adbad35 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleHederaOperationsTest.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleHederaOperationsTest.java @@ -417,7 +417,9 @@ void createContractWithSelfAdminParentDispatchesAsExpectedThenMarksCreated() thr final var pendingId = ContractID.newBuilder().contractNum(666L).build(); final var synthContractCreation = synthContractCreationFromParent(pendingId, parent) .copyBuilder() - .adminKey((Key) null) + .adminKey(Key.newBuilder() + .contractID(ContractID.newBuilder().contractNum(666L)) + .build()) .build(); final var synthAccountCreation = synthAccountCreationFromHapi(pendingId, CANONICAL_ALIAS, synthContractCreation); diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleSystemContractOperationsTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleSystemContractOperationsTest.java index e3f3fcbc23a2..259b38d72a5d 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleSystemContractOperationsTest.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/HandleSystemContractOperationsTest.java @@ -27,6 +27,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentCaptor.forClass; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.BDDMockito.given; @@ -35,6 +36,7 @@ import com.hedera.hapi.node.base.AccountID; import com.hedera.hapi.node.base.ContractID; +import com.hedera.hapi.node.base.Key; import com.hedera.hapi.node.base.ResponseCodeEnum; import com.hedera.hapi.node.base.Transaction; import com.hedera.hapi.node.base.TransactionID; @@ -49,13 +51,13 @@ import com.hedera.node.app.spi.fees.ExchangeRateInfo; import com.hedera.node.app.spi.key.KeyVerifier; import com.hedera.node.app.spi.signatures.SignatureVerification; +import com.hedera.node.app.spi.signatures.VerificationAssistant; import com.hedera.node.app.spi.workflows.HandleContext; import java.util.function.Predicate; import org.apache.tuweni.bytes.Bytes; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; @@ -74,6 +76,9 @@ class HandleSystemContractOperationsTest { @Mock private VerificationStrategy strategy; + @Mock + private Predicate callback; + @Mock private SignatureVerification passed; @@ -93,10 +98,32 @@ void setUp() { subject = new HandleSystemContractOperations(context, A_SECP256K1_KEY); } + @Test + void returnsExpectedPrimitiveTest() { + given(strategy.asPrimitiveSignatureTestIn(context, A_SECP256K1_KEY)).willReturn(callback); + assertSame(callback, subject.primitiveSignatureTestWith(strategy)); + } + + @Test + void returnsExpectedTest() { + final var captor = forClass(VerificationAssistant.class); + doCallRealMethod().when(strategy).asSignatureTestIn(context, A_SECP256K1_KEY); + given(strategy.asPrimitiveSignatureTestIn(context, A_SECP256K1_KEY)).willReturn(callback); + given(context.keyVerifier()).willReturn(keyVerifier); + given(keyVerifier.verificationFor(eq(Key.DEFAULT), captor.capture())).willReturn(passed); + given(passed.passed()).willReturn(true); + + final var test = subject.signatureTestWith(strategy); + + assertTrue(test.test(Key.DEFAULT)); + captor.getValue().test(Key.DEFAULT, failed); + verify(callback).test(Key.DEFAULT); + } + @Test @SuppressWarnings("unchecked") void dispatchesRespectingGivenStrategy() { - final var captor = ArgumentCaptor.forClass(Predicate.class); + final var captor = forClass(Predicate.class); given(strategy.decideForPrimitive(TestHelpers.A_CONTRACT_KEY)).willReturn(Decision.VALID); given(strategy.decideForPrimitive(AN_ED25519_KEY)).willReturn(Decision.DELEGATE_TO_CRYPTOGRAPHIC_VERIFICATION); given(strategy.decideForPrimitive(TestHelpers.B_SECP256K1_KEY)) @@ -106,7 +133,7 @@ void dispatchesRespectingGivenStrategy() { given(context.keyVerifier()).willReturn(keyVerifier); given(keyVerifier.verificationFor(AN_ED25519_KEY)).willReturn(passed); given(keyVerifier.verificationFor(TestHelpers.B_SECP256K1_KEY)).willReturn(failed); - doCallRealMethod().when(strategy).asSignatureTestIn(context, A_SECP256K1_KEY); + doCallRealMethod().when(strategy).asPrimitiveSignatureTestIn(context, A_SECP256K1_KEY); subject.dispatch(TransactionBody.DEFAULT, strategy, A_NEW_ACCOUNT_ID, CryptoTransferStreamBuilder.class); diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/QuerySystemContractOperationsTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/QuerySystemContractOperationsTest.java index 41ddf79a80eb..9b481ddb2ef9 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/QuerySystemContractOperationsTest.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/scope/QuerySystemContractOperationsTest.java @@ -87,8 +87,10 @@ void dispatchingNotSupported() { } @Test - void sigTestNotSupported() { - assertThrows(UnsupportedOperationException.class, () -> subject.activeSignatureTestWith(verificationStrategy)); + void sigTestsNotSupported() { + assertThrows( + UnsupportedOperationException.class, () -> subject.primitiveSignatureTestWith(verificationStrategy)); + assertThrows(UnsupportedOperationException.class, () -> subject.signatureTestWith(verificationStrategy)); } @Test diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/CallAttemptHelpers.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/CallAttemptHelpers.java index d49c2a9c3b65..11d17b1e7a71 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/CallAttemptHelpers.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/CallAttemptHelpers.java @@ -81,6 +81,30 @@ public static HtsCallAttempt prepareHtsAttemptWithSelectorForRedirect( false); } + public static HtsCallAttempt prepareHtsAttemptWithSelectorForRedirectWithConfig( + final Function function, + final CallTranslator translator, + final HederaWorldUpdater.Enhancement enhancement, + final AddressIdConverter addressIdConverter, + final VerificationStrategies verificationStrategies, + final SystemContractGasCalculator gasCalculator, + final Configuration config) { + final var input = TestHelpers.bytesForRedirect(function.selector(), NON_SYSTEM_LONG_ZERO_ADDRESS); + + return new HtsCallAttempt( + input, + OWNER_BESU_ADDRESS, + OWNER_BESU_ADDRESS, + false, + enhancement, + config, + addressIdConverter, + verificationStrategies, + gasCalculator, + List.of(translator), + false); + } + public static HtsCallAttempt prepareHtsAttemptWithSelectorAndCustomConfig( final Function function, final CallTranslator translator, @@ -126,4 +150,27 @@ public static HasCallAttempt prepareHasAttemptWithSelector( List.of(translator), false); } + + public static HasCallAttempt prepareHasAttemptWithSelectorAndCustomConfig( + final Function function, + final CallTranslator translator, + final HederaWorldUpdater.Enhancement enhancement, + final AddressIdConverter addressIdConverter, + final VerificationStrategies verificationStrategies, + final SystemContractGasCalculator gasCalculator, + final Configuration config) { + final var input = TestHelpers.bytesForRedirectAccount(function.selector(), NON_SYSTEM_LONG_ZERO_ADDRESS); + return new HasCallAttempt( + input, + OWNER_BESU_ADDRESS, + OWNER_BESU_ADDRESS, + false, + enhancement, + config, + addressIdConverter, + verificationStrategies, + gasCalculator, + List.of(translator), + false); + } } diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsCallTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsCallTest.java new file mode 100644 index 000000000000..eecc6b8b1d38 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsCallTest.java @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.has.setunlimitedautoassociations; + +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.REVERTED_SUCCESS; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; + +import com.hedera.hapi.node.base.ResponseCodeEnum; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.HasCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations.SetUnlimitedAutoAssociationsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations.SetUnlimitedAutoAssociationsTranslator; +import com.hedera.node.app.service.contract.impl.records.ContractCallStreamBuilder; +import com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.common.CallTestBase; +import org.apache.tuweni.bytes.Bytes; +import org.hyperledger.besu.evm.frame.MessageFrame.State; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class SetUnlimitedAutoAssociationsCallTest extends CallTestBase { + + @Mock + private HasCallAttempt attempt; + + @Mock + private TransactionBody transactionBody; + + @Mock + private ContractCallStreamBuilder recordBuilder; + + @Mock + private SystemContractGasCalculator gasCalculator; + + private SetUnlimitedAutoAssociationsCall subject; + + @Test + void successCall() { + given(attempt.systemContractGasCalculator()).willReturn(gasCalculator); + given(attempt.enhancement()).willReturn(mockEnhancement()); + given(systemContractOperations.dispatch(any(), any(), any(), any())).willReturn(recordBuilder); + given(recordBuilder.status()).willReturn(ResponseCodeEnum.SUCCESS); + + subject = new SetUnlimitedAutoAssociationsCall(attempt, transactionBody); + final var result = subject.execute(frame).fullResult().result(); + assertEquals(State.COMPLETED_SUCCESS, result.getState()); + assertEquals( + Bytes.wrap(SetUnlimitedAutoAssociationsTranslator.SET_UNLIMITED_AUTO_ASSOC + .getOutputs() + .encodeElements((long) SUCCESS.getNumber()) + .array()), + result.getOutput()); + } + + @Test + void revertCall() { + given(attempt.systemContractGasCalculator()).willReturn(gasCalculator); + given(attempt.enhancement()).willReturn(mockEnhancement()); + given(systemContractOperations.dispatch(any(), any(), any(), any())).willReturn(recordBuilder); + given(recordBuilder.status()).willReturn(ResponseCodeEnum.REVERTED_SUCCESS); + + subject = new SetUnlimitedAutoAssociationsCall(attempt, transactionBody); + final var result = subject.execute(frame).fullResult().result(); + assertEquals(State.COMPLETED_SUCCESS, result.getState()); + assertEquals( + Bytes.wrap(SetUnlimitedAutoAssociationsTranslator.SET_UNLIMITED_AUTO_ASSOC + .getOutputs() + .encodeElements((long) REVERTED_SUCCESS.getNumber()) + .array()), + result.getOutput()); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsTranslatorTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsTranslatorTest.java new file mode 100644 index 000000000000..be0cfd81444b --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/has/setunlimitedautoassociations/SetUnlimitedAutoAssociationsTranslatorTest.java @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.has.setunlimitedautoassociations; + +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations.SetUnlimitedAutoAssociationsTranslator.SET_UNLIMITED_AUTO_ASSOC; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHasAttemptWithSelectorAndCustomConfig; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.BDDMockito.given; + +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.scope.HederaNativeOperations; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategies; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.HasCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations.SetUnlimitedAutoAssociationsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.has.setunlimitedautoassociations.SetUnlimitedAutoAssociationsTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.config.data.ContractsConfig; +import com.swirlds.config.api.Configuration; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class SetUnlimitedAutoAssociationsTranslatorTest { + + @Mock + private HasCallAttempt attempt; + + @Mock + private SystemContractGasCalculator gasCalculator; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private HederaWorldUpdater.Enhancement enhancement; + + @Mock + private VerificationStrategies verificationStrategies; + + @Mock + private HederaNativeOperations nativeOperations; + + @Mock + private Configuration configuration; + + @Mock + private ContractsConfig contractsConfig; + + private SetUnlimitedAutoAssociationsTranslator subject; + + @BeforeEach + void setUp() { + subject = new SetUnlimitedAutoAssociationsTranslator(); + } + + @Test + void matchesWhenEnabled() { + given(enhancement.nativeOperations()).willReturn(nativeOperations); + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractSetUnlimitedAutoAssociationsEnabled()) + .willReturn(true); + attempt = prepareHasAttemptWithSelectorAndCustomConfig( + SET_UNLIMITED_AUTO_ASSOC, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + assertTrue(subject.matches(attempt)); + } + + @Test + void matchesWhenDisabled() { + given(enhancement.nativeOperations()).willReturn(nativeOperations); + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractSetUnlimitedAutoAssociationsEnabled()) + .willReturn(false); + attempt = prepareHasAttemptWithSelectorAndCustomConfig( + SET_UNLIMITED_AUTO_ASSOC, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + assertFalse(subject.matches(attempt)); + } + + @Test + void callFromWithTrueValue() { + final var inputBytes = SET_UNLIMITED_AUTO_ASSOC.encodeCallWithArgs(true); + given(attempt.inputBytes()).willReturn(inputBytes.array()); + given(attempt.enhancement()).willReturn(enhancement); + given(attempt.systemContractGasCalculator()).willReturn(gasCalculator); + + final var call = subject.callFrom(attempt); + assertThat(call).isInstanceOf(SetUnlimitedAutoAssociationsCall.class); + } + + @Test + void callFromWithFalseValue() { + final var inputBytes = SET_UNLIMITED_AUTO_ASSOC.encodeCallWithArgs(false); + given(attempt.inputBytes()).willReturn(inputBytes.array()); + given(attempt.enhancement()).willReturn(enhancement); + given(attempt.systemContractGasCalculator()).willReturn(gasCalculator); + + final var call = subject.callFrom(attempt); + assertThat(call).isInstanceOf(SetUnlimitedAutoAssociationsCall.class); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/airdrops/TokenAirdropDecoderTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/airdrops/TokenAirdropDecoderTest.java new file mode 100644 index 000000000000..20ccb28445b2 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/airdrops/TokenAirdropDecoderTest.java @@ -0,0 +1,296 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.airdrops; + +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops.TokenAirdropTranslator.TOKEN_AIRDROP; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_ACCOUNT_AS_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SYSTEM_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.asHeadlongAddress; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.lenient; + +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.AccountAmount; +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.base.NftTransfer; +import com.hedera.hapi.node.base.TokenTransferList; +import com.hedera.hapi.node.token.TokenAirdropTransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops.TokenAirdropDecoder; +import com.hedera.node.app.spi.workflows.HandleException; +import com.hedera.node.config.data.LedgerConfig; +import com.hedera.node.config.data.TokensConfig; +import com.swirlds.config.api.Configuration; +import org.apache.tuweni.bytes.Bytes; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +public class TokenAirdropDecoderTest { + + @Mock + private HtsCallAttempt attempt; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private Configuration configuration; + + @Mock + private TokensConfig tokensConfig; + + @Mock + private LedgerConfig ledgerConfig; + + private TokenAirdropDecoder subject; + + private final TokenAirdropTransactionBody tokenAirdrop = TokenAirdropTransactionBody.newBuilder() + .tokenTransfers(TokenTransferList.newBuilder() + .token(FUNGIBLE_TOKEN_ID) + .transfers( + AccountAmount.newBuilder() + .accountID(SENDER_ID) + .amount(-10) + .build(), + AccountAmount.newBuilder() + .accountID(OWNER_ID) + .amount(10) + .build()) + .build()) + .build(); + + private final TokenAirdropTransactionBody nftAirdrop = TokenAirdropTransactionBody.newBuilder() + .tokenTransfers(TokenTransferList.newBuilder() + .token(NON_FUNGIBLE_TOKEN_ID) + .nftTransfers(NftTransfer.newBuilder() + .senderAccountID(SENDER_ID) + .receiverAccountID(OWNER_ID) + .serialNumber(1) + .build()) + .build()) + .build(); + + @BeforeEach + void setUp() { + subject = new TokenAirdropDecoder(); + lenient() + .when(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .thenReturn(SENDER_ID); + lenient().when(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).thenReturn(OWNER_ID); + } + + @Test + void tokenAirdropDecoderWorks() { + final var tuple = new Tuple[] { + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}) + }; + final var encoded = Bytes.wrapByteBuffer(TOKEN_AIRDROP.encodeCall(Tuple.singleton(tuple))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(attempt.configuration()).willReturn(configuration); + given(attempt.addressIdConverter()).willReturn(addressIdConverter); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenTransfersMaxLen()).willReturn(10); + given(ledgerConfig.nftTransfersMaxLen()).willReturn(10); + final var body = subject.decodeAirdrop(attempt); + assertNotNull(body); + assertNotNull(body.tokenAirdrop()); + assertNotNull(body.tokenAirdrop().tokenTransfers()); + assertEquals(tokenAirdrop, body.tokenAirdrop()); + } + + @Test + void tokenAirdropDecoderFailsIfReceiverIsSystemAcc() { + final var tuple = new Tuple[] { + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(asHeadlongAddress(SYSTEM_ADDRESS), 10L, false) + }, + new Tuple[] {}) + }; + final var encoded = Bytes.wrapByteBuffer(TOKEN_AIRDROP.encodeCall(Tuple.singleton(tuple))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(attempt.configuration()).willReturn(configuration); + given(attempt.addressIdConverter()).willReturn(addressIdConverter); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenTransfersMaxLen()).willReturn(10); + given(ledgerConfig.nftTransfersMaxLen()).willReturn(10); + given(addressIdConverter.convert(asHeadlongAddress(SYSTEM_ADDRESS))) + .willReturn(AccountID.newBuilder().accountNum(750).build()); + assertThrows(HandleException.class, () -> subject.decodeAirdrop(attempt)); + } + + @Test + void tokenAirdropDecoderFailsIfAirdropExceedsLimits() { + final var tuple = new Tuple[] { + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + Tuple.of( + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), -10L, false), + Tuple.of(OWNER_ACCOUNT_AS_ADDRESS, 10L, false) + }, + new Tuple[] {}), + }; + final var encoded = Bytes.wrapByteBuffer(TOKEN_AIRDROP.encodeCall(Tuple.singleton(tuple))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(attempt.configuration()).willReturn(configuration); + given(attempt.addressIdConverter()).willReturn(addressIdConverter); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenTransfersMaxLen()).willReturn(10); + given(ledgerConfig.nftTransfersMaxLen()).willReturn(10); + assertThrows(HandleException.class, () -> subject.decodeAirdrop(attempt)); + } + + @Test + void tokenAirdropDecoderWorksForNFT() { + final var tuple = new Tuple[] { + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, new Tuple[] {}, new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 1L, false) + }) + }; + final var encoded = Bytes.wrapByteBuffer(TOKEN_AIRDROP.encodeCall(Tuple.singleton(tuple))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(attempt.configuration()).willReturn(configuration); + given(attempt.addressIdConverter()).willReturn(addressIdConverter); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenTransfersMaxLen()).willReturn(10); + given(ledgerConfig.nftTransfersMaxLen()).willReturn(10); + final var body = subject.decodeAirdrop(attempt); + assertNotNull(body); + assertNotNull(body.tokenAirdrop()); + assertNotNull(body.tokenAirdrop().tokenTransfers()); + assertEquals(nftAirdrop, body.tokenAirdrop()); + } + + @Test + void tokenAirdropDecoderForNFTFailsIfNftExceedLimits() { + final var tuple = new Tuple[] { + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, new Tuple[] {}, new Tuple[] { + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 1L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 2L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 3L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 4L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 5L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 6L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 7L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 8L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 9L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 10L, false), + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), OWNER_ACCOUNT_AS_ADDRESS, 11L, false) + }) + }; + final var encoded = Bytes.wrapByteBuffer(TOKEN_AIRDROP.encodeCall(Tuple.singleton(tuple))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(attempt.configuration()).willReturn(configuration); + given(attempt.addressIdConverter()).willReturn(addressIdConverter); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenTransfersMaxLen()).willReturn(10); + given(ledgerConfig.nftTransfersMaxLen()).willReturn(10); + assertThrows(HandleException.class, () -> subject.decodeAirdrop(attempt)); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/airdrops/TokenAirdropTranslatorTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/airdrops/TokenAirdropTranslatorTest.java new file mode 100644 index 000000000000..308abe6e207c --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/airdrops/TokenAirdropTranslatorTest.java @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.airdrops; + +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops.TokenAirdropTranslator.TOKEN_AIRDROP; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.when; + +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategy; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops.TokenAirdropDecoder; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.airdrops.TokenAirdropTranslator; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.common.CallTestBase; +import com.hedera.node.config.data.ContractsConfig; +import com.swirlds.config.api.Configuration; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class TokenAirdropTranslatorTest extends CallTestBase { + + @Mock + private TokenAirdropDecoder decoder; + + @Mock + private HtsCallAttempt attempt; + + @Mock + private ContractsConfig contractsConfig; + + @Mock + private Configuration configuration; + + @Mock + private SystemContractGasCalculator gasCalculator; + + @Mock + private TransactionBody transactionBody; + + @Mock + private HederaWorldUpdater.Enhancement enhancement; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private VerificationStrategy verificationStrategy; + + @Mock + private AccountID payerId; + + private TokenAirdropTranslator translator; + + @BeforeEach + void setUp() { + translator = new TokenAirdropTranslator(decoder); + } + + @Test + void matchesWhenAirdropEnabled() { + when(attempt.configuration()).thenReturn(configuration); + when(configuration.getConfigData(ContractsConfig.class)).thenReturn(contractsConfig); + when(contractsConfig.systemContractAirdropTokensEnabled()).thenReturn(true); + when(attempt.isSelectorIfConfigEnabled(TOKEN_AIRDROP, true)).thenReturn(true); + + boolean result = translator.matches(attempt); + + assertEquals(true, result); + } + + @Test + void matchesWhenAirdropDisabled() { + when(attempt.configuration()).thenReturn(configuration); + when(configuration.getConfigData(ContractsConfig.class)).thenReturn(contractsConfig); + when(contractsConfig.systemContractAirdropTokensEnabled()).thenReturn(false); + when(attempt.isSelectorIfConfigEnabled(TOKEN_AIRDROP, false)).thenReturn(false); + + boolean result = translator.matches(attempt); + + assertEquals(false, result); + } + + @Test + void gasRequirementCalculatesCorrectly() { + long expectedGas = 1000L; + when(gasCalculator.gasRequirement(transactionBody, DispatchType.TOKEN_AIRDROP, payerId)) + .thenReturn(expectedGas); + + long result = TokenAirdropTranslator.gasRequirement(transactionBody, gasCalculator, enhancement, payerId); + + assertEquals(expectedGas, result); + } + + @Test + void callFromCreatesCorrectCall() { + given(attempt.enhancement()).willReturn(mockEnhancement()); + given(attempt.addressIdConverter()).willReturn(addressIdConverter); + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(attempt.defaultVerificationStrategy()).willReturn(verificationStrategy); + given(attempt.systemContractGasCalculator()).willReturn(gasCalculator); + + final var call = translator.callFrom(attempt); + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropDecoderTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropDecoderTest.java new file mode 100644 index 000000000000..a0a699bc70f6 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropDecoderTest.java @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.cancelairdrops; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TOKEN_ID; +import static com.hedera.hapi.node.base.ResponseCodeEnum.PENDING_AIRDROP_ID_LIST_TOO_LONG; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropTranslator.CANCEL_AIRDROP; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_FT; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_NFT; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_ACCOUNT_AS_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.asHeadlongAddress; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.lenient; + +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.NftID; +import com.hedera.hapi.node.base.PendingAirdropId; +import com.hedera.node.app.service.contract.impl.exec.scope.HederaNativeOperations; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropDecoder; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater.Enhancement; +import com.hedera.node.app.spi.workflows.HandleException; +import com.hedera.node.config.data.TokensConfig; +import com.swirlds.config.api.Configuration; +import java.util.ArrayList; +import org.apache.tuweni.bytes.Bytes; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class TokenCancelAirdropDecoderTest { + + @Mock + private HtsCallAttempt attempt; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private Enhancement enhancement; + + @Mock + private HederaNativeOperations nativeOperations; + + @Mock + private Configuration configuration; + + @Mock + private TokensConfig tokensConfig; + + private TokenCancelAirdropDecoder subject; + + @BeforeEach + void setup() { + subject = new TokenCancelAirdropDecoder(); + + lenient().when(attempt.addressIdConverter()).thenReturn(addressIdConverter); + lenient().when(attempt.configuration()).thenReturn(configuration); + lenient().when(attempt.enhancement()).thenReturn(enhancement); + lenient().when(enhancement.nativeOperations()).thenReturn(nativeOperations); + lenient().when(configuration.getConfigData(TokensConfig.class)).thenReturn(tokensConfig); + } + + @Test + void cancelAirdropDecoder1FTTest() { + // given: + given(tokensConfig.maxAllowedPendingAirdropsToCancel()).willReturn(10); + given(nativeOperations.getToken(FUNGIBLE_TOKEN_ID.tokenNum())).willReturn(FUNGIBLE_TOKEN); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer(CANCEL_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .fungibleTokenType(FUNGIBLE_TOKEN_ID) + .build()); + // when: + final var decoded = subject.decodeCancelAirdrop(attempt); + + // then: + assertNotNull(decoded.tokenCancelAirdrop()); + assertEquals(expected, decoded.tokenCancelAirdrop().pendingAirdrops()); + } + + @Test + void failsIfPendingAirdropsAboveLimit() { + // given: + given(tokensConfig.maxAllowedPendingAirdropsToCancel()).willReturn(2); + + final var tuple = Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L); + final var encoded = + Bytes.wrapByteBuffer(CANCEL_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] {tuple, tuple, tuple}))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + assertThatExceptionOfType(HandleException.class) + .isThrownBy(() -> subject.decodeCancelAirdrop(attempt)) + .withMessage(PENDING_AIRDROP_ID_LIST_TOO_LONG.protoName()); + } + + @Test + void failsIfTokenIsNull() { + // given: + given(tokensConfig.maxAllowedPendingAirdropsToCancel()).willReturn(10); + given(nativeOperations.getToken(FUNGIBLE_TOKEN_ID.tokenNum())).willReturn(null); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer(CANCEL_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + assertThatExceptionOfType(HandleException.class) + .isThrownBy(() -> subject.decodeCancelAirdrop(attempt)) + .withMessage(INVALID_TOKEN_ID.protoName()); + } + + @Test + void cancelAirdropDecoder1NFTTest() { + // given: + given(tokensConfig.maxAllowedPendingAirdropsToCancel()).willReturn(10); + given(nativeOperations.getToken(NON_FUNGIBLE_TOKEN_ID.tokenNum())).willReturn(NON_FUNGIBLE_TOKEN); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer(CANCEL_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 1L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .nonFungibleToken( + NftID.newBuilder().tokenId(NON_FUNGIBLE_TOKEN_ID).serialNumber(1L)) + .build()); + + // when: + final var decoded = subject.decodeCancelAirdrop(attempt); + + // then: + assertNotNull(decoded.tokenCancelAirdrop()); + assertEquals(expected, decoded.tokenCancelAirdrop().pendingAirdrops()); + } + + @Test + void cancelFTAirdropHRC() { + // given: + given(attempt.redirectTokenId()).willReturn(FUNGIBLE_TOKEN_ID); + given(attempt.senderId()).willReturn(SENDER_ID); + + final var encoded = Bytes.wrapByteBuffer(HRC_CANCEL_AIRDROP_FT.encodeCallWithArgs(OWNER_ACCOUNT_AS_ADDRESS)); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var decoded = subject.decodeCancelAirdropFT(attempt); + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .fungibleTokenType(FUNGIBLE_TOKEN_ID) + .build()); + + // then: + assertNotNull(decoded.tokenCancelAirdrop()); + assertEquals(expected, decoded.tokenCancelAirdrop().pendingAirdrops()); + } + + @Test + void cancelNFTAirdropHRC() { + // given: + given(attempt.redirectTokenId()).willReturn(NON_FUNGIBLE_TOKEN_ID); + given(attempt.senderId()).willReturn(SENDER_ID); + + final var encoded = + Bytes.wrapByteBuffer(HRC_CANCEL_AIRDROP_NFT.encodeCallWithArgs(OWNER_ACCOUNT_AS_ADDRESS, 1L)); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var decoded = subject.decodeCancelAirdropNFT(attempt); + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .nonFungibleToken( + NftID.newBuilder().tokenId(NON_FUNGIBLE_TOKEN_ID).serialNumber(1L)) + .build()); + // then: + assertNotNull(decoded.tokenCancelAirdrop()); + assertEquals(expected, decoded.tokenCancelAirdrop().pendingAirdrops()); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropTranslatorTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropTranslatorTest.java new file mode 100644 index 000000000000..26568e3d7d0a --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/cancelairdrops/TokenCancelAirdropTranslatorTest.java @@ -0,0 +1,327 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.cancelairdrops; + +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHtsAttemptWithSelectorAndCustomConfig; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHtsAttemptWithSelectorForRedirectWithConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.verify; + +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.scope.HederaNativeOperations; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategies; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategy; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropDecoder; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.cancelairdrops.TokenCancelAirdropTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.mint.MintTranslator; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater.Enhancement; +import com.hedera.node.config.data.ContractsConfig; +import com.swirlds.config.api.Configuration; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class TokenCancelAirdropTranslatorTest { + + @Mock + private TokenCancelAirdropDecoder decoder; + + @Mock + private HtsCallAttempt attempt; + + @Mock + private ContractsConfig contractsConfig; + + @Mock + private Configuration configuration; + + @Mock + private Enhancement enhancement; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private VerificationStrategies verificationStrategies; + + @Mock + private VerificationStrategy verificationStrategy; + + @Mock + private SystemContractGasCalculator gasCalculator; + + @Mock + private HederaNativeOperations nativeOperations; + + @Mock + private TransactionBody transactionBody; + + @Mock + private AccountID payerId; + + private TokenCancelAirdropTranslator subject; + + @BeforeEach + void setUp() { + subject = new TokenCancelAirdropTranslator(decoder); + } + + @Test + void matchesHTSCancelAirdropEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(true); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenCancelAirdropTranslator.CANCEL_AIRDROP, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void matchesFailsOnWrongSelector() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(true); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + MintTranslator.MINT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void matchesHTSCancelAirdropDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(false); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenCancelAirdropTranslator.CANCEL_AIRDROP, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void matchesHRCCancelFTAirdropEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(true); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void matchesHRCCancelAirdropDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(false); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void matchesHRCCancelNFTAirdropEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(true); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void matchesHRCCancelNFTAirdropDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractCancelAirdropsEnabled()).willReturn(false); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void gasRequirementCalculatesCorrectly() { + long expectedGas = 1000L; + given(gasCalculator.gasRequirement(transactionBody, DispatchType.TOKEN_CANCEL_AIRDROP, payerId)) + .willReturn(expectedGas); + + long result = TokenCancelAirdropTranslator.gasRequirement(transactionBody, gasCalculator, enhancement, payerId); + + assertEquals(expectedGas, result); + } + + @Test + void callFromHtsCancelAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenCancelAirdropTranslator.CANCEL_AIRDROP, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + verify(decoder).decodeCancelAirdrop(attempt); + } + + @Test + void callFromHRCCancelFTAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + verify(decoder).decodeCancelAirdropFT(attempt); + } + + @Test + void callFromHRCCancelNFTAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenCancelAirdropTranslator.HRC_CANCEL_AIRDROP_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + verify(decoder).decodeCancelAirdropNFT(attempt); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropDecoderTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropDecoderTest.java new file mode 100644 index 000000000000..5b08ccfa2231 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropDecoderTest.java @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.claimairdrops; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.PENDING_AIRDROP_ID_LIST_TOO_LONG; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropTranslator.CLAIM_AIRDROP; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_FT; +import static com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_NFT; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_ACCOUNT_AS_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.asHeadlongAddress; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.lenient; + +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.NftID; +import com.hedera.hapi.node.base.PendingAirdropId; +import com.hedera.node.app.service.contract.impl.exec.scope.HederaNativeOperations; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropDecoder; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater.Enhancement; +import com.hedera.node.app.spi.workflows.HandleException; +import com.hedera.node.config.data.TokensConfig; +import com.swirlds.config.api.Configuration; +import java.util.ArrayList; +import org.apache.tuweni.bytes.Bytes; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +public class TokenClaimAirdropDecoderTest { + @Mock + private HtsCallAttempt attempt; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private Enhancement enhancement; + + @Mock + private HederaNativeOperations nativeOperations; + + @Mock + private Configuration configuration; + + @Mock + private TokensConfig tokensConfig; + + private TokenClaimAirdropDecoder subject; + + @BeforeEach + void setup() { + subject = new TokenClaimAirdropDecoder(); + + lenient().when(attempt.addressIdConverter()).thenReturn(addressIdConverter); + } + + @Test + void claimAirdropDecoder1FTTest() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(attempt.enhancement()).willReturn(enhancement); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + given(configuration.getConfigData(TokensConfig.class)).willReturn(tokensConfig); + given(tokensConfig.maxAllowedPendingAirdropsToClaim()).willReturn(10); + given(nativeOperations.getToken(FUNGIBLE_TOKEN_ID.tokenNum())).willReturn(FUNGIBLE_TOKEN); + given(addressIdConverter.convert(asHeadlongAddress(OWNER_ID.accountNum()))) + .willReturn(OWNER_ID); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + + final var encoded = Bytes.wrapByteBuffer(CLAIM_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .fungibleTokenType(FUNGIBLE_TOKEN_ID) + .build()); + // when: + final var decoded = subject.decodeTokenClaimAirdrop(attempt); + + // then: + assertNotNull(decoded.tokenClaimAirdrop()); + assertEquals(expected, decoded.tokenClaimAirdrop().pendingAirdrops()); + } + + @Test + void failsIfPendingAirdropsAboveLimit() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(configuration.getConfigData(TokensConfig.class)).willReturn(tokensConfig); + given(tokensConfig.maxAllowedPendingAirdropsToClaim()).willReturn(10); + + final var encoded = Bytes.wrapByteBuffer(CLAIM_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L), + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + assertThrows(HandleException.class, () -> subject.decodeTokenClaimAirdrop(attempt)); + } + + @Test + void failsIfTokenIsNull() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(attempt.enhancement()).willReturn(enhancement); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + given(configuration.getConfigData(TokensConfig.class)).willReturn(tokensConfig); + given(tokensConfig.maxAllowedPendingAirdropsToClaim()).willReturn(10); + given(nativeOperations.getToken(FUNGIBLE_TOKEN_ID.tokenNum())).willReturn(null); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer(CLAIM_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 0L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + assertThrows( + HandleException.class, + () -> subject.decodeTokenClaimAirdrop(attempt), + PENDING_AIRDROP_ID_LIST_TOO_LONG.protoName()); + } + + @Test + void failsIfTokenIsNullHRCFT() { + final var encoded = Bytes.wrapByteBuffer(HRC_CLAIM_AIRDROP_FT.encodeCallWithArgs(OWNER_ACCOUNT_AS_ADDRESS)); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + assertThrows( + HandleException.class, + () -> subject.decodeHrcClaimAirdropFt(attempt), + PENDING_AIRDROP_ID_LIST_TOO_LONG.protoName()); + } + + @Test + void failsIfTokenIsNullHRCNFT() { + final var encoded = + Bytes.wrapByteBuffer(HRC_CLAIM_AIRDROP_NFT.encodeCallWithArgs(OWNER_ACCOUNT_AS_ADDRESS, 1L)); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + assertThrows( + HandleException.class, + () -> subject.decodeHrcClaimAirdropNft(attempt), + PENDING_AIRDROP_ID_LIST_TOO_LONG.protoName()); + } + + @Test + void claimAirdropDecoder1NFTTest() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(attempt.enhancement()).willReturn(enhancement); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + given(configuration.getConfigData(TokensConfig.class)).willReturn(tokensConfig); + given(tokensConfig.maxAllowedPendingAirdropsToClaim()).willReturn(10); + given(nativeOperations.getToken(NON_FUNGIBLE_TOKEN_ID.tokenNum())).willReturn(NON_FUNGIBLE_TOKEN); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + given(addressIdConverter.convert(OWNER_ACCOUNT_AS_ADDRESS)).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer(CLAIM_AIRDROP.encodeCall(Tuple.singleton(new Tuple[] { + Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + OWNER_ACCOUNT_AS_ADDRESS, + NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + 1L) + }))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .nonFungibleToken( + NftID.newBuilder().tokenId(NON_FUNGIBLE_TOKEN_ID).serialNumber(1L)) + .build()); + + // when: + final var decoded = subject.decodeTokenClaimAirdrop(attempt); + + // then: + assertNotNull(decoded.tokenClaimAirdrop()); + assertEquals(expected, decoded.tokenClaimAirdrop().pendingAirdrops()); + } + + @Test + void claimTAirdropHRC() { + // given: + given(attempt.redirectTokenId()).willReturn(FUNGIBLE_TOKEN_ID); + given(attempt.senderId()).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer( + HRC_CLAIM_AIRDROP_FT.encodeCallWithArgs(asHeadlongAddress(SENDER_ID.accountNum()))); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + + final var decoded = subject.decodeHrcClaimAirdropFt(attempt); + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .fungibleTokenType(FUNGIBLE_TOKEN_ID) + .build()); + + // then: + assertNotNull(decoded.tokenClaimAirdrop()); + assertEquals(expected, decoded.tokenClaimAirdrop().pendingAirdrops()); + } + + @Test + void claimNFTAirdropHRC() { + // given: + given(attempt.redirectTokenId()).willReturn(NON_FUNGIBLE_TOKEN_ID); + given(attempt.senderId()).willReturn(OWNER_ID); + + final var encoded = Bytes.wrapByteBuffer( + HRC_CLAIM_AIRDROP_NFT.encodeCallWithArgs(asHeadlongAddress(SENDER_ID.accountNum()), 1L)); + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + + final var decoded = subject.decodeHrcClaimAirdropNft(attempt); + var expected = new ArrayList(); + expected.add(PendingAirdropId.newBuilder() + .senderId(SENDER_ID) + .receiverId(OWNER_ID) + .nonFungibleToken( + NftID.newBuilder().tokenId(NON_FUNGIBLE_TOKEN_ID).serialNumber(1L)) + .build()); + // then: + assertNotNull(decoded.tokenClaimAirdrop()); + assertEquals(expected, decoded.tokenClaimAirdrop().pendingAirdrops()); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropTranslatorTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropTranslatorTest.java new file mode 100644 index 000000000000..909542db94c2 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/claimairdrops/TokenClaimAirdropTranslatorTest.java @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.claimairdrops; + +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHtsAttemptWithSelectorAndCustomConfig; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHtsAttemptWithSelectorForRedirectWithConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.when; + +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.scope.HederaNativeOperations; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategies; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategy; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropDecoder; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.claimairdrops.TokenClaimAirdropTranslator; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater; +import com.hedera.node.config.data.ContractsConfig; +import com.swirlds.config.api.Configuration; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class TokenClaimAirdropTranslatorTest { + + @Mock + private TokenClaimAirdropDecoder decoder; + + @Mock + private HtsCallAttempt attempt; + + @Mock + private Configuration configuration; + + @Mock + private ContractsConfig contractsConfig; + + @Mock + private HederaWorldUpdater.Enhancement enhancement; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private VerificationStrategies verificationStrategies; + + @Mock + private VerificationStrategy verificationStrategy; + + @Mock + private SystemContractGasCalculator gasCalculator; + + @Mock + private HederaNativeOperations nativeOperations; + + @Mock + private TransactionBody transactionBody; + + @Mock + private AccountID payerId; + + private TokenClaimAirdropTranslator subject; + + @BeforeEach + void setUp() { + subject = new TokenClaimAirdropTranslator(decoder); + } + + @Test + void testMatchesWhenClaimAirdropEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractClaimAirdropsEnabled()).willReturn(true); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenClaimAirdropTranslator.CLAIM_AIRDROP, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void testMatchesWhenClaimAirdropDisabled() { + // given: + + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractClaimAirdropsEnabled()).willReturn(false); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenClaimAirdropTranslator.CLAIM_AIRDROP, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void testMatchesHRCClaimFT() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractClaimAirdropsEnabled()).willReturn(true); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void testMatchesHRCClaimNFT() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractClaimAirdropsEnabled()).willReturn(true); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void testMatchesHRCClaimFTDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractClaimAirdropsEnabled()).willReturn(false); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void testMatchesHRCClaimNFTDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractClaimAirdropsEnabled()).willReturn(false); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void testCallFromForClassic() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenClaimAirdropTranslator.CLAIM_AIRDROP, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + } + + @Test + void callFromHRCClaimFTAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + } + + @Test + void callFromHRCCancelNFTAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + TokenClaimAirdropTranslator.HRC_CLAIM_AIRDROP_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + } + + @Test + void testGasRequirement() { + long expectedGas = 1000L; + when(gasCalculator.gasRequirement(transactionBody, DispatchType.TOKEN_CLAIM_AIRDROP, payerId)) + .thenReturn(expectedGas); + + long gas = TokenClaimAirdropTranslator.gasRequirement(transactionBody, gasCalculator, enhancement, payerId); + + assertEquals(expectedGas, gas); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/rejecttokens/RejectTokensDecoderTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/rejecttokens/RejectTokensDecoderTest.java new file mode 100644 index 000000000000..54dd9465083f --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/rejecttokens/RejectTokensDecoderTest.java @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.rejecttokens; + +import static com.hedera.hapi.node.base.ResponseCodeEnum.TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.NON_FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.OWNER_HEADLONG_ADDRESS; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.asHeadlongAddress; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.lenient; + +import com.esaulpaugh.headlong.abi.Address; +import com.esaulpaugh.headlong.abi.Tuple; +import com.hedera.hapi.node.base.NftID; +import com.hedera.hapi.node.token.TokenReference; +import com.hedera.hapi.node.token.TokenRejectTransactionBody; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens.RejectTokensDecoder; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens.RejectTokensTranslator; +import com.hedera.node.app.spi.workflows.HandleException; +import com.hedera.node.config.data.LedgerConfig; +import com.swirlds.config.api.Configuration; +import org.apache.tuweni.bytes.Bytes; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +public class RejectTokensDecoderTest { + + @Mock + private HtsCallAttempt attempt; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private Configuration configuration; + + @Mock + private LedgerConfig ledgerConfig; + + private RejectTokensDecoder subject; + + @BeforeEach + void setup() { + subject = new RejectTokensDecoder(); + + lenient().when(attempt.addressIdConverter()).thenReturn(addressIdConverter); + } + + @Test + void decodeHtsCall() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenRejectsMaxLen()).willReturn(10); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + final var encoded = Bytes.wrapByteBuffer(RejectTokensTranslator.TOKEN_REJECT.encodeCall(Tuple.of( + asHeadlongAddress(SENDER_ID.accountNum()), + new Address[] {FUNGIBLE_TOKEN_HEADLONG_ADDRESS}, + new Tuple[] {}))); + + // when + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + final var expected = TokenRejectTransactionBody.newBuilder() + .owner(SENDER_ID) + .rejections(TokenReference.newBuilder() + .fungibleToken(FUNGIBLE_TOKEN_ID) + .build()) + .owner(SENDER_ID) + .build(); + + // then + final var decoded = subject.decodeTokenRejects(attempt); + + assertNotNull(decoded); + assertEquals(expected, decoded.tokenReject()); + } + + @Test + void decodeHtsCallNFT() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenRejectsMaxLen()).willReturn(10); + given(addressIdConverter.convert(asHeadlongAddress(SENDER_ID.accountNum()))) + .willReturn(SENDER_ID); + + final var encoded = Bytes.wrapByteBuffer(RejectTokensTranslator.TOKEN_REJECT.encodeCall( + Tuple.of(asHeadlongAddress(SENDER_ID.accountNum()), new Address[] {}, new Tuple[] { + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L) + }))); + + // when + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + final var expected = TokenRejectTransactionBody.newBuilder() + .owner(SENDER_ID) + .rejections(TokenReference.newBuilder() + .nft(NftID.newBuilder() + .tokenId(NON_FUNGIBLE_TOKEN_ID) + .serialNumber(1L) + .build()) + .build()) + .owner(SENDER_ID) + .build(); + + // then + final var decoded = subject.decodeTokenRejects(attempt); + + assertNotNull(decoded); + assertEquals(expected, decoded.tokenReject()); + } + + @Test + void decodeFailsIfReferencesExceedLimits() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenRejectsMaxLen()).willReturn(10); + + final var encoded = Bytes.wrapByteBuffer(RejectTokensTranslator.TOKEN_REJECT.encodeCall(Tuple.of( + OWNER_HEADLONG_ADDRESS, + new Address[] { + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS, + FUNGIBLE_TOKEN_HEADLONG_ADDRESS + }, + new Tuple[] { + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L), + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L), + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L), + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L), + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L), + Tuple.of(NON_FUNGIBLE_TOKEN_HEADLONG_ADDRESS, 1L), + }))); + + // when + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + // then + assertThatExceptionOfType(HandleException.class) + .isThrownBy(() -> subject.decodeTokenRejects(attempt)) + .withMessage(TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED.toString()); + } + + @Test + void decodeHRCFungible() { + // given: + given(attempt.senderId()).willReturn(SENDER_ID); + + // when + given(attempt.redirectTokenId()).willReturn(FUNGIBLE_TOKEN_ID); + + final var expected = TokenRejectTransactionBody.newBuilder() + .rejections(TokenReference.newBuilder() + .fungibleToken(FUNGIBLE_TOKEN_ID) + .build()) + .owner(SENDER_ID) + .build(); + + // then + final var decoded = subject.decodeHrcTokenRejectFT(attempt); + + assertNotNull(decoded); + assertEquals(expected, decoded.tokenReject()); + } + + @Test + void decodeHRCNft() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenRejectsMaxLen()).willReturn(10); + + final var encoded = + Bytes.wrapByteBuffer(RejectTokensTranslator.HRC_TOKEN_REJECT_NFT.encodeCall(Tuple.of(new long[] {1L}))); + + // when + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + given(attempt.senderId()).willReturn(SENDER_ID); + given(attempt.redirectTokenId()).willReturn(NON_FUNGIBLE_TOKEN_ID); + + final var expected = TokenRejectTransactionBody.newBuilder() + .rejections(TokenReference.newBuilder() + .nft(NftID.newBuilder() + .tokenId(NON_FUNGIBLE_TOKEN_ID) + .serialNumber(1L) + .build()) + .build()) + .owner(SENDER_ID) + .build(); + + // then + final var decoded = subject.decodeHrcTokenRejectNFT(attempt); + + assertNotNull(decoded); + assertEquals(expected, decoded.tokenReject()); + } + + @Test + void decodeFailsWhenHRCNftExceedsLimits() { + // given: + given(attempt.configuration()).willReturn(configuration); + given(configuration.getConfigData(LedgerConfig.class)).willReturn(ledgerConfig); + given(ledgerConfig.tokenRejectsMaxLen()).willReturn(2); + + final var encoded = Bytes.wrapByteBuffer( + RejectTokensTranslator.HRC_TOKEN_REJECT_NFT.encodeCall(Tuple.of(new long[] {1L, 2L, 3L}))); + + // when + given(attempt.inputBytes()).willReturn(encoded.toArrayUnsafe()); + + // then + assertThatExceptionOfType(HandleException.class) + .isThrownBy(() -> subject.decodeHrcTokenRejectNFT(attempt)) + .withMessage(TOKEN_REFERENCE_LIST_SIZE_LIMIT_EXCEEDED.toString()); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/rejecttokens/RejectTokensTranslatorTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/rejecttokens/RejectTokensTranslatorTest.java new file mode 100644 index 000000000000..62036e369899 --- /dev/null +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/rejecttokens/RejectTokensTranslatorTest.java @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.hts.rejecttokens; + +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.FUNGIBLE_TOKEN_ID; +import static com.hedera.node.app.service.contract.impl.test.TestHelpers.SENDER_ID; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHtsAttemptWithSelectorAndCustomConfig; +import static com.hedera.node.app.service.contract.impl.test.exec.systemcontracts.CallAttemptHelpers.prepareHtsAttemptWithSelectorForRedirectWithConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.verify; + +import com.hedera.hapi.node.base.AccountID; +import com.hedera.hapi.node.token.TokenReference; +import com.hedera.hapi.node.token.TokenRejectTransactionBody; +import com.hedera.hapi.node.transaction.TransactionBody; +import com.hedera.node.app.service.contract.impl.exec.gas.DispatchType; +import com.hedera.node.app.service.contract.impl.exec.gas.SystemContractGasCalculator; +import com.hedera.node.app.service.contract.impl.exec.scope.HederaNativeOperations; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategies; +import com.hedera.node.app.service.contract.impl.exec.scope.VerificationStrategy; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.AddressIdConverter; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.DispatchForResponseCodeHtsCall; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.HtsCallAttempt; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.burn.BurnTranslator; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens.RejectTokensDecoder; +import com.hedera.node.app.service.contract.impl.exec.systemcontracts.hts.rejecttokens.RejectTokensTranslator; +import com.hedera.node.app.service.contract.impl.hevm.HederaWorldUpdater.Enhancement; +import com.hedera.node.config.data.ContractsConfig; +import com.swirlds.config.api.Configuration; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +public class RejectTokensTranslatorTest { + + @Mock + private RejectTokensDecoder decoder; + + @Mock + private HtsCallAttempt attempt; + + @Mock + private ContractsConfig contractsConfig; + + @Mock + private Configuration configuration; + + @Mock + private Enhancement enhancement; + + @Mock + private AddressIdConverter addressIdConverter; + + @Mock + private VerificationStrategies verificationStrategies; + + @Mock + private VerificationStrategy verificationStrategy; + + @Mock + private SystemContractGasCalculator gasCalculator; + + @Mock + private HederaNativeOperations nativeOperations; + + @Mock + private TransactionBody transactionBody; + + @Mock + private AccountID payerId; + + private RejectTokensTranslator subject; + + @BeforeEach + void setUp() { + subject = new RejectTokensTranslator(decoder); + } + + @Test + void matchesHTSWithInvalidSig() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(true); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + BurnTranslator.BURN_TOKEN_V1, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void matchesHTSWithConfigEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(true); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + RejectTokensTranslator.TOKEN_REJECT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void matchesHTSWithConfigDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(false); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + RejectTokensTranslator.TOKEN_REJECT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void matchesFungibleHRCWithConfigEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(true); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + RejectTokensTranslator.HRC_TOKEN_REJECT_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void matchesFungibleHRCWithConfigDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(false); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + RejectTokensTranslator.HRC_TOKEN_REJECT_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void matchesNftHRCWithConfigEnabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(true); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + RejectTokensTranslator.HRC_TOKEN_REJECT_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertTrue(matches); + } + + @Test + void matchesNftHRCWithConfigDisabled() { + // given: + given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig); + given(contractsConfig.systemContractRejectTokensEnabled()).willReturn(false); + given(enhancement.nativeOperations()).willReturn(nativeOperations); + attempt = prepareHtsAttemptWithSelectorForRedirectWithConfig( + RejectTokensTranslator.HRC_TOKEN_REJECT_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + boolean matches = subject.matches(attempt); + + // then: + assertFalse(matches); + } + + @Test + void gasRequirementCalculatesCorrectly() { + long expectedGas = 1000L; + final var body = TokenRejectTransactionBody.newBuilder() + .rejections(TokenReference.newBuilder() + .fungibleToken(FUNGIBLE_TOKEN_ID) + .build()) + .owner(SENDER_ID) + .build(); + given(gasCalculator.canonicalPriceInTinycents(DispatchType.TOKEN_REJECT_FT)) + .willReturn(expectedGas); + given(transactionBody.tokenReject()).willReturn(body); + given(gasCalculator.gasRequirementWithTinycents(transactionBody, payerId, expectedGas)) + .willReturn(expectedGas); + long result = RejectTokensTranslator.gasRequirement(transactionBody, gasCalculator, enhancement, payerId); + + assertEquals(expectedGas, result); + } + + @Test + void gasRequirementHRCFungible() { + long expectedGas = 1000L; + given(gasCalculator.gasRequirement(transactionBody, DispatchType.TOKEN_REJECT_FT, payerId)) + .willReturn(expectedGas); + long result = + RejectTokensTranslator.gasRequirementHRCFungible(transactionBody, gasCalculator, enhancement, payerId); + + assertEquals(expectedGas, result); + } + + @Test + void gasRequirementHRCNft() { + long expectedGas = 1000L; + given(gasCalculator.gasRequirement(transactionBody, DispatchType.TOKEN_REJECT_NFT, payerId)) + .willReturn(expectedGas); + long result = RejectTokensTranslator.gasRequirementHRCNft(transactionBody, gasCalculator, enhancement, payerId); + + assertEquals(expectedGas, result); + } + + @Test + void callFromHtsTokenReject() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + RejectTokensTranslator.TOKEN_REJECT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + verify(decoder).decodeTokenRejects(attempt); + } + + @Test + void callFromHRCCancelFTAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + RejectTokensTranslator.HRC_TOKEN_REJECT_FT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + verify(decoder).decodeHrcTokenRejectFT(attempt); + } + + @Test + void callFromHRCCancelNFTAirdrop() { + // given: + given(addressIdConverter.convertSender(any())).willReturn(SENDER_ID); + given(verificationStrategies.activatingOnlyContractKeysFor(any(), anyBoolean(), any())) + .willReturn(verificationStrategy); + attempt = prepareHtsAttemptWithSelectorAndCustomConfig( + RejectTokensTranslator.HRC_TOKEN_REJECT_NFT, + subject, + enhancement, + addressIdConverter, + verificationStrategies, + gasCalculator, + configuration); + + // when: + var call = subject.callFrom(attempt); + + // then: + assertEquals(DispatchForResponseCodeHtsCall.class, call.getClass()); + verify(decoder).decodeHrcTokenRejectNFT(attempt); + } +} diff --git a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/transfer/ClassicTransfersCallTest.java b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/transfer/ClassicTransfersCallTest.java index f993c993418b..4cc0b4a81274 100644 --- a/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/transfer/ClassicTransfersCallTest.java +++ b/hedera-node/hedera-smart-contract-service-impl/src/test/java/com/hedera/node/app/service/contract/impl/test/exec/systemcontracts/hts/transfer/ClassicTransfersCallTest.java @@ -97,8 +97,7 @@ void transferHappyPathCompletesWithSuccessResponseCode() { eq(ContractCallStreamBuilder.class))) .willReturn(recordBuilder); given(recordBuilder.status()).willReturn(SUCCESS); - given(systemContractOperations.activeSignatureTestWith(verificationStrategy)) - .willReturn(signatureTest); + given(systemContractOperations.signatureTestWith(verificationStrategy)).willReturn(signatureTest); given(approvalSwitchHelper.switchToApprovalsAsNeededIn( CryptoTransferTransactionBody.DEFAULT, signatureTest, nativeOperations, A_NEW_ACCOUNT_ID)) .willReturn(CryptoTransferTransactionBody.DEFAULT); @@ -130,8 +129,7 @@ void retryingTransferHappyPathCompletesWithSuccessResponseCode() { eq(ContractCallStreamBuilder.class))) .willReturn(recordBuilder); given(recordBuilder.status()).willReturn(SUCCESS); - given(systemContractOperations.activeSignatureTestWith(verificationStrategy)) - .willReturn(signatureTest); + given(systemContractOperations.signatureTestWith(verificationStrategy)).willReturn(signatureTest); given(approvalSwitchHelper.switchToApprovalsAsNeededIn( CryptoTransferTransactionBody.DEFAULT, signatureTest, nativeOperations, A_NEW_ACCOUNT_ID)) .willReturn(CryptoTransferTransactionBody.DEFAULT); @@ -156,8 +154,7 @@ void retryingTransferInvalidSignatureCompletesWithStandardizedResponseCode() { given(recordBuilder.status()) .willReturn(INVALID_SIGNATURE) .willReturn(INVALID_FULL_PREFIX_SIGNATURE_FOR_PRECOMPILE); - given(systemContractOperations.activeSignatureTestWith(verificationStrategy)) - .willReturn(signatureTest); + given(systemContractOperations.signatureTestWith(verificationStrategy)).willReturn(signatureTest); given(approvalSwitchHelper.switchToApprovalsAsNeededIn( CryptoTransferTransactionBody.DEFAULT, signatureTest, nativeOperations, A_NEW_ACCOUNT_ID)) .willReturn(CryptoTransferTransactionBody.DEFAULT); diff --git a/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/api/TokenServiceApiImpl.java b/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/api/TokenServiceApiImpl.java index dd8841a8f524..7c2fd136c3c8 100644 --- a/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/api/TokenServiceApiImpl.java +++ b/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/api/TokenServiceApiImpl.java @@ -65,8 +65,6 @@ */ public class TokenServiceApiImpl implements TokenServiceApi { private static final Logger logger = LogManager.getLogger(TokenServiceApiImpl.class); - private static final Key STANDIN_CONTRACT_KEY = - Key.newBuilder().contractID(ContractID.newBuilder().contractNum(0)).build(); private final WritableAccountStore accountStore; private final AccountID fundingAccountID; @@ -178,7 +176,9 @@ public void finalizeHollowAccountAsContract(@NonNull final AccountID hollowAccou } final var accountAsContract = hollowAccount .copyBuilder() - .key(STANDIN_CONTRACT_KEY) + .key(Key.newBuilder() + .contractID(ContractID.newBuilder().contractNum(hollowAccountId.accountNumOrThrow())) + .build()) .smartContract(true) .maxAutoAssociations(hollowAccount.numberAssociations()) .build(); diff --git a/hedera-node/hedera-token-service-impl/src/test/java/com/hedera/node/app/service/token/impl/test/api/TokenServiceApiImplTest.java b/hedera-node/hedera-token-service-impl/src/test/java/com/hedera/node/app/service/token/impl/test/api/TokenServiceApiImplTest.java index bb70fc35608f..8380e5a64d64 100644 --- a/hedera-node/hedera-token-service-impl/src/test/java/com/hedera/node/app/service/token/impl/test/api/TokenServiceApiImplTest.java +++ b/hedera-node/hedera-token-service-impl/src/test/java/com/hedera/node/app/service/token/impl/test/api/TokenServiceApiImplTest.java @@ -193,8 +193,10 @@ void refusesToUpdateKvCountsForNonContract() { @Test void finalizesHollowAccountAsContractAsExpected() { + final var numAssociations = 3; accountStore.put(Account.newBuilder() .accountId(CONTRACT_ACCOUNT_ID) + .numberAssociations(numAssociations) .key(IMMUTABILITY_SENTINEL_KEY) .build()); @@ -203,8 +205,9 @@ void finalizesHollowAccountAsContractAsExpected() { assertEquals(1, accountStore.sizeOfAccountState()); final var finalizedAccount = accountStore.getContractById(CONTRACT_ID_BY_NUM); assertNotNull(finalizedAccount); - assertEquals(STANDIN_CONTRACT_KEY, finalizedAccount.key()); + assertEquals(Key.newBuilder().contractID(CONTRACT_ID_BY_NUM).build(), finalizedAccount.key()); assertTrue(finalizedAccount.smartContract()); + assertEquals(finalizedAccount.maxAutoAssociations(), numAssociations); } @Test diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/FakeTssBaseService.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/FakeTssBaseService.java index 04c29d6ac293..691a861ac0d4 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/FakeTssBaseService.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/fakes/FakeTssBaseService.java @@ -22,10 +22,11 @@ import com.hedera.hapi.node.state.roster.Roster; import com.hedera.node.app.spi.AppContext; import com.hedera.node.app.spi.workflows.HandleContext; +import com.hedera.node.app.tss.PlaceholderTssLibrary; import com.hedera.node.app.tss.TssBaseService; import com.hedera.node.app.tss.TssBaseServiceImpl; import com.hedera.node.app.tss.handlers.TssHandlers; -import com.hedera.node.app.tss.stores.ReadableTssBaseStore; +import com.hedera.node.app.tss.stores.ReadableTssStoreImpl; import com.hedera.pbj.runtime.io.buffer.Bytes; import com.hedera.services.bdd.junit.HapiTest; import com.swirlds.common.utility.CommonUtils; @@ -83,7 +84,12 @@ public enum Signing { private final Queue pendingTssSubmission = new ArrayDeque<>(); public FakeTssBaseService(@NonNull final AppContext appContext) { - delegate = new TssBaseServiceImpl(appContext, ForkJoinPool.commonPool(), pendingTssSubmission::offer); + delegate = new TssBaseServiceImpl( + appContext, + ForkJoinPool.commonPool(), + pendingTssSubmission::offer, + new PlaceholderTssLibrary(), + ForkJoinPool.commonPool()); } /** @@ -134,7 +140,7 @@ public void useRealSignatures() { public Status getStatus( @NonNull final Roster roster, @NonNull final Bytes ledgerId, - @NonNull final ReadableTssBaseStore tssBaseStore) { + @NonNull final ReadableTssStoreImpl tssBaseStore) { requireNonNull(roster); requireNonNull(ledgerId); requireNonNull(tssBaseStore); diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/impl/TokenClaimAirdropSystemContractTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/impl/TokenClaimAirdropSystemContractTest.java new file mode 100644 index 000000000000..11a8a14abc56 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/impl/TokenClaimAirdropSystemContractTest.java @@ -0,0 +1,403 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.junit.support.translators.impl; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.includingFungiblePendingAirdrop; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.includingNftPendingAirdrop; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.recordWith; +import static com.hedera.services.bdd.spec.queries.QueryVerbs.getTxnRecord; +import static com.hedera.services.bdd.spec.transactions.TxnVerbs.tokenAirdrop; +import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.moving; +import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.movingUnique; +import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor; +import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.CONTRACT_REVERT_EXECUTED; + +import com.esaulpaugh.headlong.abi.Address; +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.OrderedInIsolation; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.HapiSpec; +import com.hedera.services.bdd.spec.SpecOperation; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.Contract; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecContract; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import com.hedera.services.bdd.spec.dsl.operations.queries.GetBalanceOperation; +import com.hedera.services.bdd.spec.transactions.token.TokenMovement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +@OrderedInIsolation +class TokenClaimAirdropSystemContractTest { + + @Contract(contract = "ClaimAirdrop", creationGas = 1_000_000L) + static SpecContract claimAirdrop; + + @Account(name = "sender", tinybarBalance = 100_000_000_000L) + static SpecAccount sender; + + @Account(name = "receiver", maxAutoAssociations = 0) + static SpecAccount receiver; + + @FungibleToken(name = "token", initialSupply = 1000) + static SpecFungibleToken token; + + @BeforeAll + public static void setUp(final @NonNull TestLifecycle lifecycle) { + lifecycle.doAdhoc( + sender.authorizeContract(claimAirdrop), + receiver.authorizeContract(claimAirdrop), + sender.associateTokens(token), + token.treasury().transferUnitsTo(sender, 1000, token)); + } + + @Order(0) + @HapiTest + @DisplayName("Can claim 1 fungible airdrop") + public Stream claimAirdrop() { + return hapiTest( + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0)), + tokenAirdrop(moving(10, token.name()).between(sender.name(), receiver.name())) + .payingWith(sender.name()) + .via("tokenAirdrop"), + getTxnRecord("tokenAirdrop") + .hasPriority(recordWith() + .pendingAirdrops(includingFungiblePendingAirdrop( + moving(10, token.name()).between(sender.name(), receiver.name())))), + claimAirdrop + .call("claim", sender, receiver, token) + .payingWith(receiver) + .via("claimAirdrop"), + getTxnRecord("claimAirdrop").hasPriority(recordWith().pendingAirdropsCount(0)), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 10))); + } + + @Order(1) + @HapiTest + @DisplayName("Can claim 1 nft airdrop") + public Stream claimNftAirdrop(@NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0)), + tokenAirdrop(movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())) + .payingWith(sender.name()) + .via("tokenAirdrop"), + getTxnRecord("tokenAirdrop") + .hasPriority(recordWith() + .pendingAirdrops(includingNftPendingAirdrop( + movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())))), + claimAirdrop + .call("claimNFTAirdrop", sender, receiver, nft, 1L) + .payingWith(receiver) + .via("claimNFTAirdrop"), + getTxnRecord("claimNFTAirdrop").hasPriority(recordWith().pendingAirdropsCount(0)), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 1))); + } + + @Order(2) + @HapiTest + @DisplayName("Can claim 10 fungible airdrops") + public Stream claim10Airdrops( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token4, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token5, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft4, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft5) { + final var tokenList = List.of(token1, token2, token3, token4, token5); + final var nftList = List.of(nft1, nft2, nft3, nft4, nft5); + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, prepareTokensAndBalances(sender, receiver, tokenList, nftList)); + prepareAirdrops(tokenList, nftList, spec); + final var senders = prepareSenderAddresses( + spec, sender, sender, sender, sender, sender, sender, sender, sender, sender, sender); + final var receivers = prepareReceiverAddresses( + spec, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, + receiver); + final var tokens = prepareTokenAddresses(spec, token1, token2, token3, token4, token5); + final var nfts = prepareNftAddresses(spec, nft1, nft2, nft3, nft4, nft5); + final var combined = + Stream.concat(Arrays.stream(tokens), Arrays.stream(nfts)).toArray(Address[]::new); + final var serials = new long[] {0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L}; + allRunFor( + spec, + claimAirdrop + .call("claimAirdrops", senders, receivers, combined, serials) + .via("claimAirdrops"), + getTxnRecord("claimAirdrops").hasPriority(recordWith().pendingAirdropsCount(0)), + checkForBalances(receiver, tokenList, nftList)); + })); + } + + @Order(3) + @HapiTest + @DisplayName("Can claim 3 fungible airdrops") + public Stream claim3Airdrops( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3) { + final var tokenList = List.of(token1, token2, token3); + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, prepareTokensAndBalances(sender, receiver, tokenList, List.of())); + prepareAirdrops(tokenList, List.of(), spec); + final var senders = prepareSenderAddresses(spec, sender, sender, sender); + final var receivers = prepareReceiverAddresses(spec, receiver, receiver, receiver); + final var tokens = prepareTokenAddresses(spec, token1, token2, token3); + final var serials = new long[] {0L, 0L, 0L}; + allRunFor( + spec, + claimAirdrop + .call("claimAirdrops", senders, receivers, tokens, serials) + .via("claimAirdrops"), + getTxnRecord("claimAirdrops").hasPriority(recordWith().pendingAirdropsCount(0)), + checkForBalances(receiver, tokenList, List.of())); + })); + } + + @Order(4) + @HapiTest + @DisplayName("Fails to claim 11 pending airdrops") + public Stream failToClaim11Airdrops( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token4, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token5, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token6, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft4, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft5) { + final var tokenList = List.of(token1, token2, token3, token4, token5, token6); + final var nftList = List.of(nft1, nft2, nft3, nft4, nft5); + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, prepareTokensAndBalances(sender, receiver, tokenList, nftList)); + // Spread transactions to avoid hitting the max airdrops limit + prepareAirdrops(List.of(token1, token2, token3), List.of(), spec); + prepareAirdrops(List.of(token4, token5, token6), List.of(), spec); + prepareAirdrops(List.of(), nftList, spec); + final var senders = prepareSenderAddresses( + spec, sender, sender, sender, sender, sender, sender, sender, sender, sender, sender, sender); + final var receivers = prepareReceiverAddresses( + spec, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, + receiver, receiver); + final var tokens = prepareTokenAddresses(spec, token1, token2, token3, token4, token5); + final var nfts = prepareNftAddresses(spec, nft1, nft2, nft3, nft4, nft5); + final var combined = + Stream.concat(Arrays.stream(tokens), Arrays.stream(nfts)).toArray(Address[]::new); + final var serials = new long[] {0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L}; + allRunFor( + spec, + claimAirdrop + .call("claimAirdrops", senders, receivers, combined, serials) + .via("claimAirdrops") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + })); + } + + @Order(5) + @HapiTest + @DisplayName("Fails to claim pending airdrop with invalid token") + public Stream failToClaim1AirdropWithInvalidToken() { + return hapiTest(claimAirdrop + .call("claim", sender, receiver, receiver) + .payingWith(sender) + .via("claimAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @Order(6) + @HapiTest + @DisplayName("Fails to claim pending airdrop with invalid sender") + public Stream failToClaim1AirdropWithInvalidSender() { + return hapiTest(claimAirdrop + .call("claim", token, receiver, token) + .payingWith(sender) + .via("claimAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @Order(7) + @HapiTest + @DisplayName("Fails to claim airdrop having no pending airdrops") + public Stream failToClaimAirdropWhenThereAreNoPending() { + return hapiTest(claimAirdrop + .call("claim", sender, receiver, token) + .payingWith(sender) + .via("claimAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @Order(8) + @HapiTest + @DisplayName("Fails to claim pending airdrop with invalid receiver") + public Stream failToClaim1AirdropWithInvalidReceiver() { + return hapiTest(claimAirdrop + .call("claim", sender, token, token) + .payingWith(sender) + .via("claimAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @Order(9) + @HapiTest + @DisplayName("Fails to claim nft airdrop with invalid nft") + public Stream failToClaim1AirdropWithInvalidNft() { + return hapiTest(claimAirdrop + .call("claimNFTAirdrop", sender, receiver, receiver, 1L) + .payingWith(sender) + .via("claimAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @Order(10) + @HapiTest + @DisplayName("Fails to claim nft airdrop with invalid nft serial") + public Stream failToClaim1AirdropWithInvalidSerial(@NonFungibleToken final SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + claimAirdrop + .call("claimNFTAirdrop", sender, receiver, nft, 1L) + .payingWith(sender) + .via("claimAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @Order(11) + @HapiTest + private void prepareAirdrops( + @NonNull List tokens, @NonNull List nfts, @NonNull HapiSpec spec) { + var tokenMovements = prepareFTAirdrops(sender, receiver, tokens); + var nftMovements = prepareNFTAirdrops(sender, receiver, nfts); + allRunFor( + spec, + tokenAirdrop(Stream.of(tokenMovements, nftMovements) + .flatMap(Collection::stream) + .toArray(TokenMovement[]::new)) + .payingWith(sender.name()) + .via("tokenAirdrop"), + getTxnRecord("tokenAirdrop") + .hasPriority(recordWith() + .pendingAirdrops( + includingFungiblePendingAirdrop(tokenMovements.toArray(TokenMovement[]::new))) + .pendingAirdrops( + includingNftPendingAirdrop(nftMovements.toArray(TokenMovement[]::new))))); + } + + private SpecOperation[] prepareTokensAndBalances( + final SpecAccount sender, + final SpecAccount receiver, + final List tokens, + final List nfts) { + ArrayList specOperations = new ArrayList<>(); + specOperations.addAll(List.of( + sender.associateTokens(tokens.toArray(SpecFungibleToken[]::new)), + sender.associateTokens(nfts.toArray(SpecNonFungibleToken[]::new)), + checkForEmptyBalance(receiver, tokens, nfts))); + specOperations.addAll(tokens.stream() + .map(token -> token.treasury().transferUnitsTo(sender, 1_000L, token)) + .toList()); + specOperations.addAll(nfts.stream() + .map(nft -> nft.treasury().transferNFTsTo(sender, nft, 1L)) + .toList()); + + return specOperations.toArray(SpecOperation[]::new); + } + + private GetBalanceOperation checkForEmptyBalance( + final SpecAccount receiver, final List tokens, final List nfts) { + return receiver.getBalance().andAssert(balance -> { + tokens.forEach(token -> balance.hasTokenBalance(token.name(), 0L)); + nfts.forEach(nft -> balance.hasTokenBalance(nft.name(), 0L)); + }); + } + + private GetBalanceOperation checkForBalances( + final SpecAccount receiver, final List tokens, final List nfts) { + return receiver.getBalance().andAssert(balance -> { + tokens.forEach(token -> balance.hasTokenBalance(token.name(), 10L)); + nfts.forEach(nft -> balance.hasTokenBalance(nft.name(), 1L)); + }); + } + + private Address[] prepareSenderAddresses(@NonNull HapiSpec spec, @NonNull SpecAccount... senders) { + return Arrays.stream(senders) + .map(sender -> sender.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareReceiverAddresses(@NonNull HapiSpec spec, @NonNull SpecAccount... receivers) { + return Arrays.stream(receivers) + .map(receiver -> receiver.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareTokenAddresses(@NonNull HapiSpec spec, @NonNull SpecFungibleToken... tokens) { + return Arrays.stream(tokens) + .map(token -> token.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareNftAddresses(@NonNull HapiSpec spec, @NonNull SpecNonFungibleToken... nfts) { + return Arrays.stream(nfts) + .map(nft -> nft.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private List prepareFTAirdrops( + @NonNull final SpecAccount sender, + @NonNull final SpecAccount receiver, + @NonNull final List tokens) { + return tokens.stream() + .map(token -> moving(10, token.name()).between(sender.name(), receiver.name())) + .toList(); + } + + private List prepareNFTAirdrops( + @NonNull final SpecAccount sender, + @NonNull final SpecAccount receiver, + @NonNull final List nfts) { + return nfts.stream() + .map(nft -> movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())) + .toList(); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java index c597803d96fc..a97026932e2f 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java @@ -21,7 +21,6 @@ import static com.hedera.node.app.hapi.utils.CommonUtils.sha384DigestOrThrow; import static com.hedera.node.app.info.UnavailableNetworkInfo.UNAVAILABLE_NETWORK_INFO; import static com.hedera.node.app.spi.AppContext.Gossip.UNAVAILABLE_GOSSIP; -import static com.hedera.node.app.workflows.handle.metric.UnavailableMetrics.UNAVAILABLE_METRICS; import static com.hedera.services.bdd.junit.hedera.ExternalPath.APPLICATION_PROPERTIES; import static com.hedera.services.bdd.junit.hedera.ExternalPath.SAVED_STATES_DIR; import static com.hedera.services.bdd.junit.hedera.ExternalPath.SWIRLDS_LOG; @@ -64,7 +63,7 @@ import com.hedera.node.app.ids.EntityIdService; import com.hedera.node.app.info.GenesisNetworkInfo; import com.hedera.node.app.records.BlockRecordService; -import com.hedera.node.app.roster.RosterServiceImpl; +import com.hedera.node.app.roster.RosterService; import com.hedera.node.app.service.addressbook.impl.AddressBookServiceImpl; import com.hedera.node.app.service.consensus.impl.ConsensusServiceImpl; import com.hedera.node.app.service.contract.impl.ContractServiceImpl; @@ -81,6 +80,7 @@ import com.hedera.node.app.spi.signatures.SignatureVerifier; import com.hedera.node.app.state.recordcache.RecordCacheService; import com.hedera.node.app.throttle.CongestionThrottleService; +import com.hedera.node.app.tss.PlaceholderTssLibrary; import com.hedera.node.app.tss.TssBaseServiceImpl; import com.hedera.node.app.version.ServicesSoftwareVersion; import com.hedera.node.config.VersionedConfiguration; @@ -93,6 +93,7 @@ import com.hedera.services.bdd.junit.support.BlockStreamAccess; import com.hedera.services.bdd.junit.support.BlockStreamValidator; import com.hedera.services.bdd.spec.HapiSpec; +import com.swirlds.common.RosterStateId; import com.swirlds.common.constructable.ConstructableRegistry; import com.swirlds.common.context.PlatformContext; import com.swirlds.common.crypto.Hash; @@ -164,7 +165,7 @@ public static void main(String[] args) { .normalize(); final var validator = new StateChangesValidator( Bytes.fromHex( - "f31a2b563cbe3fef1242bd94bc610fc5134267faa7f3fefc5de176cc1f4032f28d5b27f084bbc388c5a766e4d057acdd"), + "bc49350852851a2c737ef6b5db24da8ba108401952ec207a1a5a4230de8d8a626da1f3663f0560bd6cf401c601b08896"), node0Dir.resolve("output/swirlds.log"), node0Dir.resolve("genesis-config.txt"), node0Dir.resolve("data/config/application.properties"), @@ -251,7 +252,9 @@ public StateChangesValidator( final var configVersion = bootstrapConfig.getConfigData(HederaConfig.class).configVersion(); final var currentVersion = new ServicesSoftwareVersion(servicesVersion, configVersion); - final var lifecycles = newPlatformInitLifecycle(bootstrapConfig, currentVersion, migrator, servicesRegistry); + final var metrics = new NoOpMetrics(); + final var lifecycles = + newPlatformInitLifecycle(bootstrapConfig, currentVersion, migrator, servicesRegistry, metrics); this.state = new MerkleStateRoot(lifecycles, version -> new ServicesSoftwareVersion(version, configVersion)); initGenesisPlatformState( new FakePlatformContext(NodeId.of(0), Executors.newSingleThreadScheduledExecutor()), @@ -270,7 +273,7 @@ public StateChangesValidator( new ServicesSoftwareVersion(servicesVersion, configVersion), new ConfigProviderImpl().getConfiguration(), networkInfo, - new NoOpMetrics()); + metrics); logger.info("Registered all Service and migrated state definitions to version {}", servicesVersion); } @@ -532,7 +535,12 @@ private void registerServices( new ConsensusServiceImpl(), new ContractServiceImpl(appContext), new FileServiceImpl(), - new TssBaseServiceImpl(appContext, ForkJoinPool.commonPool(), ForkJoinPool.commonPool()), + new TssBaseServiceImpl( + appContext, + ForkJoinPool.commonPool(), + ForkJoinPool.commonPool(), + new PlaceholderTssLibrary(), + ForkJoinPool.commonPool()), new FreezeServiceImpl(), new ScheduleServiceImpl(), new TokenServiceImpl(), @@ -544,7 +552,7 @@ private void registerServices( new CongestionThrottleService(), new NetworkServiceImpl(), new AddressBookServiceImpl(), - new RosterServiceImpl(), + new RosterService(), PLATFORM_STATE_SERVICE) .forEach(servicesRegistry::register); } @@ -648,19 +656,21 @@ private static MerkleStateLifecycles newPlatformInitLifecycle( @NonNull final Configuration bootstrapConfig, @NonNull final SoftwareVersion currentVersion, @NonNull final OrderedServiceMigrator serviceMigrator, - @NonNull final ServicesRegistryImpl servicesRegistry) { + @NonNull final ServicesRegistryImpl servicesRegistry, + @NonNull final NoOpMetrics metrics) { return new MerkleStateLifecycles() { @Override public List initPlatformState(@NonNull final State state) { final var deserializedVersion = serviceMigrator.creationVersionOf(state); return serviceMigrator.doMigrations( state, - servicesRegistry.subRegistryFor(EntityIdService.NAME, PlatformStateService.NAME), + servicesRegistry.subRegistryFor( + EntityIdService.NAME, PlatformStateService.NAME, RosterStateId.NAME), deserializedVersion == null ? null : new ServicesSoftwareVersion(deserializedVersion), currentVersion, bootstrapConfig, UNAVAILABLE_NETWORK_INFO, - UNAVAILABLE_METRICS); + metrics); } @Override diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/assertions/ContractInfoAsserts.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/assertions/ContractInfoAsserts.java index 82c1c7ecd92d..683fde28ef7e 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/assertions/ContractInfoAsserts.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/assertions/ContractInfoAsserts.java @@ -16,7 +16,6 @@ package com.hedera.services.bdd.spec.assertions; -import static com.hedera.services.bdd.suites.HapiSuite.STANDIN_CONTRACT_ID_KEY; import static com.hederahashgraph.api.proto.java.ContractGetInfoResponse.ContractInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -214,12 +213,6 @@ public ContractInfoAsserts adminKey(String expectedKeyName) { return this; } - public ContractInfoAsserts hasStandinContractKey() { - registerProvider((spec, o) -> - assertEquals(STANDIN_CONTRACT_ID_KEY, object2ContractInfo(o).getAdminKey(), BAD_ADMIN_KEY)); - return this; - } - public ContractInfoAsserts defaultAdminKey() { registerProvider((spec, o) -> { final var contractId = object2ContractInfo(o).getContractID(); diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/dsl/contracts/TokenRedirectContract.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/dsl/contracts/TokenRedirectContract.java index 0f8a357dd509..fd047c5e1b23 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/dsl/contracts/TokenRedirectContract.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/dsl/contracts/TokenRedirectContract.java @@ -22,6 +22,10 @@ */ public enum TokenRedirectContract { HRC("HRC"), + // TODO: Update this to HRC904 once all tests are merged + HRC904CLAIM("HRC904TokenClaim"), + HRC904CANCEL("HRC904TokenCancel"), + HRC904REJECT("HRC904Reject"), ERC20("ERC20ABI"), ERC721("ERC721ABI"); diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/queries/schedule/HapiGetScheduleInfo.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/queries/schedule/HapiGetScheduleInfo.java index 202b2bdb9fb8..a5de85c47e3a 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/queries/schedule/HapiGetScheduleInfo.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/queries/schedule/HapiGetScheduleInfo.java @@ -28,6 +28,7 @@ import com.hedera.services.bdd.spec.queries.HapiQueryOp; import com.hedera.services.bdd.spec.transactions.TxnUtils; import com.hederahashgraph.api.proto.java.HederaFunctionality; +import com.hederahashgraph.api.proto.java.Key; import com.hederahashgraph.api.proto.java.KeyList; import com.hederahashgraph.api.proto.java.Query; import com.hederahashgraph.api.proto.java.ResponseType; @@ -192,11 +193,10 @@ protected void assertExpectationsGiven(HapiSpec spec) { var registry = spec.registry(); - expectedSignatories.ifPresent(s -> { - var expect = KeyList.newBuilder(); - for (String signatory : s) { - var key = registry.getKey(signatory); - expect.addKeys(key); + expectedSignatories.ifPresent(signatories -> { + final var expect = KeyList.newBuilder(); + for (final var signatory : signatories) { + accumulateSimple(registry.getKey(signatory), expect); } Assertions.assertArrayEquals( expect.build().getKeysList().toArray(), @@ -222,6 +222,16 @@ protected void assertExpectationsGiven(HapiSpec spec) { expectedLedgerId.ifPresent(id -> Assertions.assertEquals(id, actualInfo.getLedgerId())); } + private static void accumulateSimple(@NonNull final Key key, @NonNull final KeyList.Builder builder) { + if (key.hasEd25519() || key.hasECDSASecp256K1()) { + builder.addKeys(key); + } else if (key.hasKeyList()) { + key.getKeyList().getKeysList().forEach(k -> accumulateSimple(k, builder)); + } else if (key.hasThresholdKey()) { + key.getThresholdKey().getKeys().getKeysList().forEach(k -> accumulateSimple(k, builder)); + } + } + private void assertTimestampMatches(String txn, int nanoOffset, Timestamp actual, String errMsg, HapiSpec spec) { var subOp = getTxnRecord(txn); allRunFor(spec, subOp); diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/HapiSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/HapiSuite.java index f882089461fd..a6e04ef39722 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/HapiSuite.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/HapiSuite.java @@ -25,7 +25,6 @@ import com.hedera.services.bdd.spec.SpecOperation; import com.hedera.services.bdd.spec.infrastructure.HapiClients; import com.hedera.services.bdd.spec.keys.KeyShape; -import com.hederahashgraph.api.proto.java.ContractID; import com.hederahashgraph.api.proto.java.Key; import com.hederahashgraph.api.proto.java.KeyList; import edu.umd.cs.findbugs.annotations.NonNull; @@ -90,9 +89,6 @@ private static HapiSpec specFrom(@NonNull final DynamicTest test) { public static final Key EMPTY_KEY = Key.newBuilder().setKeyList(KeyList.newBuilder().build()).build(); - public static final Key STANDIN_CONTRACT_ID_KEY = Key.newBuilder() - .setContractID(ContractID.newBuilder().setContractNum(0).build()) - .build(); private static final int BYTES_PER_KB = 1024; public static final int MAX_CALL_DATA_SIZE = 6 * BYTES_PER_KB; public static final BigInteger WEIBARS_IN_A_TINYBAR = BigInteger.valueOf(10_000_000_000L); diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/opcodes/Create2OperationSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/opcodes/Create2OperationSuite.java index 0e4d4f5212b1..aa0734755e34 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/opcodes/Create2OperationSuite.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/opcodes/Create2OperationSuite.java @@ -1029,7 +1029,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) // check created contract sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() - .hasStandinContractKey() + .defaultAdminKey() // fix here .maxAutoAssociations(0) .memo(LAZY_MEMO) @@ -1131,7 +1131,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) // check created contract sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() - .hasStandinContractKey() + .defaultAdminKey() .maxAutoAssociations(1) .hasAlreadyUsedAutomaticAssociations(1) .memo(LAZY_MEMO) @@ -1235,7 +1235,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) // check created contract sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() - .hasStandinContractKey() + .defaultAdminKey() .numKvPairs(2) .maxAutoAssociations(2) .hasAlreadyUsedAutomaticAssociations(2) @@ -1331,7 +1331,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) // check created contract sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() - .hasStandinContractKey() + .defaultAdminKey() .maxAutoAssociations(1) .hasAlreadyUsedAutomaticAssociations(1) .memo(LAZY_MEMO) @@ -1446,7 +1446,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) // check created contract sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() - .hasStandinContractKey() + .defaultAdminKey() .maxAutoAssociations(1) .hasAlreadyUsedAutomaticAssociations(1) .memo(LAZY_MEMO) diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AirdropSystemContractTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AirdropSystemContractTest.java new file mode 100644 index 000000000000..9c3a1fdc75e2 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AirdropSystemContractTest.java @@ -0,0 +1,520 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor; +import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.CONTRACT_REVERT_EXECUTED; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INSUFFICIENT_TOKEN_BALANCE; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_ACCOUNT_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_NFT_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_TOKEN_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_TOKEN_NFT_SERIAL_NUMBER; + +import com.esaulpaugh.headlong.abi.Address; +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.OrderedInIsolation; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.HapiSpec; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.Contract; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecContract; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Arrays; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +@OrderedInIsolation +public class AirdropSystemContractTest { + + @Contract(contract = "Airdrop", creationGas = 20_000_000L) + static SpecContract airdropContract; + + @Account(name = "sender", tinybarBalance = 10_000_000_000L) + static SpecAccount sender; + + @BeforeAll + public static void beforeAll(final @NonNull TestLifecycle lifecycle) { + lifecycle.doAdhoc( + sender.authorizeContract(airdropContract), sender.transferHBarsTo(airdropContract, 5_000_000_000L)); + } + + @HapiTest + @Order(1) + @DisplayName("Airdrop token") + public Stream airdropToken( + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token) { + return hapiTest( + sender.associateTokens(token), + token.treasury().transferUnitsTo(sender, 500_000L, token), + airdropContract + .call("tokenAirdrop", token, sender, receiver, 10L) + .gas(1500000), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 10L))); + } + + @HapiTest + @Order(2) + @DisplayName("Airdrop NFT") + public Stream airdropNft( + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L), + airdropContract.call("nftAirdrop", nft, sender, receiver, 1L).gas(1500000), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 1L)), + nft.serialNo(1L).assertOwnerIs(receiver)); + } + + @HapiTest + @Order(3) + @DisplayName("Multiple Airdrop token transactions") + public Stream airdropTokens( + @NonNull @FungibleToken(name = "token1", initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(name = "token2", initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(name = "token3", initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver1, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver2, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver3) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(token1, token2, token3), + token1.treasury().transferUnitsTo(sender, 1_000L, token1), + token2.treasury().transferUnitsTo(sender, 1_000L, token2), + token3.treasury().transferUnitsTo(sender, 1_000L, token3)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 0L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 0L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 0L))); + allRunFor( + spec, + airdropContract + .call( + "tokenNAmountAirdrops", + prepareTokenAddresses(spec, token1, token2, token3), + prepareSenderAddresses(spec, sender, sender, sender), + prepareReceiverAddresses(spec, receiver1, receiver2, receiver3), + 10L) + .gas(1500000)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 10L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 10L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 10L))); + })); + } + + @HapiTest + @Order(4) + @DisplayName("Multiple Airdrop NFT transactions") + public Stream airdropNfts( + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver1, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver2, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver3) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(nft1, nft2, nft3), + nft1.treasury().transferNFTsTo(sender, nft1, 1L), + nft2.treasury().transferNFTsTo(sender, nft2, 1L), + nft3.treasury().transferNFTsTo(sender, nft3, 1L)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 0L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 0L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 0L))); + final var serials = new long[] {1L, 1L, 1L}; + allRunFor( + spec, + airdropContract + .call( + "nftNAmountAirdrops", + prepareNftAddresses(spec, nft1, nft2, nft3), + prepareSenderAddresses(spec, sender, sender, sender), + prepareReceiverAddresses(spec, receiver1, receiver2, receiver3), + serials) + .gas(1500000)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 1L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 1L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 1L)), + nft1.serialNo(1L).assertOwnerIs(receiver1), + nft2.serialNo(1L).assertOwnerIs(receiver2), + nft3.serialNo(1L).assertOwnerIs(receiver3)); + })); + } + + @HapiTest + @Order(5) + @DisplayName("Airdrop token and NFT") + public Stream airdropTokenAndNft( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver1, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver2, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver3, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver4, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver5, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver6) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(token1, token2, token3), + token1.treasury().transferUnitsTo(sender, 1_000L, token1), + token2.treasury().transferUnitsTo(sender, 1_000L, token2), + token3.treasury().transferUnitsTo(sender, 1_000L, token3)); + allRunFor( + spec, + sender.associateTokens(nft1, nft2, nft3), + nft1.treasury().transferNFTsTo(sender, nft1, 1L), + nft2.treasury().transferNFTsTo(sender, nft2, 1L), + nft3.treasury().transferNFTsTo(sender, nft3, 1L)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 0L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 0L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 0L))); + allRunFor( + spec, + receiver4.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 0L)), + receiver5.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 0L)), + receiver6.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 0L))); + final var serials = new long[] {1L, 1L, 1L}; + allRunFor( + spec, + airdropContract + .call( + "mixedAirdrop", + prepareTokenAddresses(spec, token1, token2, token3), + prepareNftAddresses(spec, nft1, nft2, nft3), + prepareSenderAddresses(spec, sender, sender, sender), + prepareReceiverAddresses(spec, receiver1, receiver2, receiver3), + prepareSenderAddresses(spec, sender, sender, sender), + prepareReceiverAddresses(spec, receiver4, receiver5, receiver6), + 10L, + serials) + .gas(1750000)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 10L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 10L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 10L)), + receiver4.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 1L)), + receiver5.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 1L)), + receiver6.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 1L)), + nft1.serialNo(1L).assertOwnerIs(receiver4), + nft2.serialNo(1L).assertOwnerIs(receiver5), + nft3.serialNo(1L).assertOwnerIs(receiver6)); + })); + } + + @HapiTest + @Order(6) + @DisplayName("Airdrop 10 token and NFT") + public Stream airdrop10TokenAndNft( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token4, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token5, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft4, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft5, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver1, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver2, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver3, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver4, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver5, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver6, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver7, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver8, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver9, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver10) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(token1, token2, token3, token4, token5), + token1.treasury().transferUnitsTo(sender, 1_000L, token1), + token2.treasury().transferUnitsTo(sender, 1_000L, token2), + token3.treasury().transferUnitsTo(sender, 1_000L, token3), + token4.treasury().transferUnitsTo(sender, 1_000L, token4), + token5.treasury().transferUnitsTo(sender, 1_000L, token5)); + allRunFor( + spec, + sender.associateTokens(nft1, nft2, nft3, nft4, nft5), + nft1.treasury().transferNFTsTo(sender, nft1, 1L), + nft2.treasury().transferNFTsTo(sender, nft2, 1L), + nft3.treasury().transferNFTsTo(sender, nft3, 1L), + nft4.treasury().transferNFTsTo(sender, nft4, 1L), + nft5.treasury().transferNFTsTo(sender, nft5, 1L)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 0L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 0L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 0L)), + receiver4.getBalance().andAssert(balance -> balance.hasTokenBalance(token4.name(), 0L)), + receiver5.getBalance().andAssert(balance -> balance.hasTokenBalance(token5.name(), 0L)), + receiver6.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 0L)), + receiver7.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 0L)), + receiver8.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 0L)), + receiver9.getBalance().andAssert(balance -> balance.hasTokenBalance(nft4.name(), 0L)), + receiver10.getBalance().andAssert(balance -> balance.hasTokenBalance(nft5.name(), 0L))); + final var serials = new long[] {1L, 1L, 1L, 1L, 1L}; + allRunFor( + spec, + airdropContract + .call( + "mixedAirdrop", + prepareTokenAddresses(spec, token1, token2, token3, token4, token5), + prepareNftAddresses(spec, nft1, nft2, nft3, nft4, nft5), + prepareSenderAddresses(spec, sender, sender, sender, sender, sender), + prepareReceiverAddresses( + spec, receiver1, receiver2, receiver3, receiver4, receiver5), + prepareSenderAddresses(spec, sender, sender, sender, sender, sender), + prepareReceiverAddresses( + spec, receiver6, receiver7, receiver8, receiver9, receiver10), + 10L, + serials) + .gas(1550000)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 10L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 10L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 10L)), + receiver4.getBalance().andAssert(balance -> balance.hasTokenBalance(token4.name(), 10L)), + receiver5.getBalance().andAssert(balance -> balance.hasTokenBalance(token5.name(), 10L)), + receiver6.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 1L)), + receiver7.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 1L)), + receiver8.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 1L)), + receiver9.getBalance().andAssert(balance -> balance.hasTokenBalance(nft4.name(), 1L)), + receiver10.getBalance().andAssert(balance -> balance.hasTokenBalance(nft5.name(), 1L)), + nft1.serialNo(1L).assertOwnerIs(receiver6), + nft2.serialNo(1L).assertOwnerIs(receiver7), + nft3.serialNo(1L).assertOwnerIs(receiver8), + nft4.serialNo(1L).assertOwnerIs(receiver9), + nft5.serialNo(1L).assertOwnerIs(receiver10)); + })); + } + + @HapiTest + @Order(7) + @DisplayName("Should fail Airdrop 11 token and NFT") + public Stream airdrop11TokenAndNft( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token4, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token5, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft4, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft5, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft6, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver1, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver2, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver3, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver4, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver5, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver6, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver7, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver8, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver9, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver10, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver11) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(token1, token2, token3, token4, token5), + token1.treasury().transferUnitsTo(sender, 1_000L, token1), + token2.treasury().transferUnitsTo(sender, 1_000L, token2), + token3.treasury().transferUnitsTo(sender, 1_000L, token3), + token4.treasury().transferUnitsTo(sender, 1_000L, token4), + token5.treasury().transferUnitsTo(sender, 1_000L, token5)); + allRunFor( + spec, + sender.associateTokens(nft1, nft2, nft3, nft4, nft5, nft6), + nft1.treasury().transferNFTsTo(sender, nft1, 1L), + nft2.treasury().transferNFTsTo(sender, nft2, 1L), + nft3.treasury().transferNFTsTo(sender, nft3, 1L), + nft4.treasury().transferNFTsTo(sender, nft4, 1L), + nft5.treasury().transferNFTsTo(sender, nft5, 1L), + nft6.treasury().transferNFTsTo(sender, nft6, 1L)); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 0L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 0L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 0L)), + receiver4.getBalance().andAssert(balance -> balance.hasTokenBalance(token4.name(), 0L)), + receiver5.getBalance().andAssert(balance -> balance.hasTokenBalance(token5.name(), 0L)), + receiver6.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 0L)), + receiver7.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 0L)), + receiver8.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 0L)), + receiver9.getBalance().andAssert(balance -> balance.hasTokenBalance(nft4.name(), 0L)), + receiver10.getBalance().andAssert(balance -> balance.hasTokenBalance(nft5.name(), 0L)), + receiver11.getBalance().andAssert(balance -> balance.hasTokenBalance(nft6.name(), 0L))); + final var serials = new long[] {1L, 1L, 1L, 1L, 1L, 1L}; + allRunFor( + spec, + airdropContract + .call( + "mixedAirdrop", + prepareTokenAddresses(spec, token1, token2, token3, token4, token5), + prepareNftAddresses(spec, nft1, nft2, nft3, nft4, nft5), + prepareSenderAddresses(spec, sender, sender, sender, sender, sender), + prepareReceiverAddresses( + spec, receiver1, receiver2, receiver3, receiver4, receiver5), + prepareSenderAddresses(spec, sender, sender, sender, sender, sender, sender), + prepareReceiverAddresses( + spec, receiver6, receiver7, receiver8, receiver9, receiver10, receiver11), + 10L, + serials) + .gas(1550000) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED))); + allRunFor( + spec, + receiver1.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 0L)), + receiver2.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 0L)), + receiver3.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 0L)), + receiver4.getBalance().andAssert(balance -> balance.hasTokenBalance(token4.name(), 0L)), + receiver5.getBalance().andAssert(balance -> balance.hasTokenBalance(token5.name(), 0L)), + receiver6.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 0L)), + receiver7.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 0L)), + receiver8.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 0L)), + receiver9.getBalance().andAssert(balance -> balance.hasTokenBalance(nft4.name(), 0L)), + receiver10.getBalance().andAssert(balance -> balance.hasTokenBalance(nft5.name(), 0L)), + receiver11.getBalance().andAssert(balance -> balance.hasTokenBalance(nft6.name(), 0L))); + })); + } + + @HapiTest + @Order(8) + @DisplayName("Airdrop fails when the sender does not have enough balance") + public Stream airdropFailsWhenSenderDoesNotHaveEnoughBalance( + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token) { + return hapiTest( + sender.associateTokens(token), + airdropContract + .call("tokenAirdrop", token, sender, receiver, 10L) + .gas(1500000) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INSUFFICIENT_TOKEN_BALANCE))); + } + + @HapiTest + @Order(9) + @DisplayName("Airdrop fails when the receiver does not have a valid account") + public Stream airdropFailsWhenReceiverDoesNotHaveValidAccount( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken tokenAsReceiver) { + return hapiTest( + sender.associateTokens(token), + token.treasury().transferUnitsTo(sender, 500_000L, token), + airdropContract + .call("tokenAirdrop", token, sender, tokenAsReceiver, 10L) + .gas(1500000) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_ACCOUNT_ID))); + } + + @HapiTest + @Order(10) + @DisplayName("Airdrop fails when the token does not exist") + public Stream airdropFailsWhenTokenDoesNotExist( + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver, + @NonNull @Account final SpecAccount accountAsToken) { + return hapiTest(airdropContract + .call("tokenAirdrop", accountAsToken, sender, receiver, 10L) + .gas(1500000) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_TOKEN_ID))); + } + + @HapiTest + @Order(11) + @DisplayName("Airdrop fails with nft serials out of bound") + public Stream failToUpdateNFTsMetadata( + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft, + @NonNull @Account(maxAutoAssociations = -1) final SpecAccount receiver) { + return hapiTest( + sender.associateTokens(nft), + airdropContract + .call("nftAirdrop", nft, sender, receiver, Long.MAX_VALUE) + .gas(1500000) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_NFT_ID)), + airdropContract + .call("nftAirdrop", nft, sender, receiver, 0L) + .gas(1500000) + .andAssert( + txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_TOKEN_NFT_SERIAL_NUMBER)), + airdropContract + .call("nftAirdrop", nft, sender, receiver, -1L) + .gas(1500000) + .andAssert(txn -> + txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_TOKEN_NFT_SERIAL_NUMBER))); + } + + private Address[] prepareReceiverAddresses(@NonNull HapiSpec spec, @NonNull SpecAccount... receivers) { + return Arrays.stream(receivers) + .map(receiver -> receiver.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareTokenAddresses(@NonNull HapiSpec spec, @NonNull SpecFungibleToken... tokens) { + return Arrays.stream(tokens) + .map(token -> token.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareNftAddresses(@NonNull HapiSpec spec, @NonNull SpecNonFungibleToken... nfts) { + return Arrays.stream(nfts) + .map(nft -> nft.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareSenderAddresses(@NonNull HapiSpec spec, @NonNull SpecAccount... senders) { + return Arrays.stream(senders) + .map(sender -> sender.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AtomicCryptoTransferHTSSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AtomicCryptoTransferHTSSuite.java index 558ea4e00fff..de9ee9b1e02b 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AtomicCryptoTransferHTSSuite.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/AtomicCryptoTransferHTSSuite.java @@ -1437,4 +1437,42 @@ final Stream blockCryptoTransferForPermittedDelegates() { .contractCallResult( htsPrecompileResult().withStatus(SPENDER_DOES_NOT_HAVE_ALLOWANCE))))); } + + @HapiTest + final Stream nullContractAdminKeyTransfer() { + final var nullAdminKeyXferTxn = "nullAdminKeyXferTxn"; + return hapiTest( + cryptoCreate(RECEIVER), + uploadInitCode(CONTRACT), + contractCreate(CONTRACT).omitAdminKey(), + cryptoTransfer(tinyBarsFromTo(GENESIS, CONTRACT, ONE_HUNDRED_HBARS)), + withOpContext((spec, opLog) -> { + final var receiver = spec.registry().getAccountID(RECEIVER); + final var contract = spec.registry().getAccountID(CONTRACT); + final var amountToBeSent = 50 * ONE_HBAR; + allRunFor( + spec, + contractCall( + CONTRACT, + TRANSFER_MULTIPLE_TOKENS, + transferList() + .withAccountAmounts( + accountAmount(contract, -amountToBeSent, false), + accountAmount(receiver, amountToBeSent, false)) + .build(), + EMPTY_TUPLE_ARRAY) + .payingWith(GENESIS) + .via(nullAdminKeyXferTxn) + .gas(GAS_TO_OFFER)); + }), + childRecordsCheck( + nullAdminKeyXferTxn, + SUCCESS, + recordWith() + .status(SUCCESS) + .contractCallResult(resultWith() + .contractCallResult( + htsPrecompileResult().withStatus(SUCCESS))) + .transfers(including(tinyBarsFromTo(CONTRACT, RECEIVER, 50 * ONE_HBAR))))); + } } diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCSetUnlimitedAutoAssociationsTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCSetUnlimitedAutoAssociationsTest.java new file mode 100644 index 000000000000..188ed48b0338 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCSetUnlimitedAutoAssociationsTest.java @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiPropertySource.idAsHeadlongAddress; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.assertions.ContractFnResultAsserts.isLiteralResult; +import static com.hedera.services.bdd.spec.assertions.ContractFnResultAsserts.resultWith; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.recordWith; +import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAccountInfo; +import static com.hedera.services.bdd.spec.queries.QueryVerbs.getTxnRecord; +import static com.hedera.services.bdd.spec.transactions.TxnVerbs.contractCallWithFunctionAbi; +import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoCreate; +import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor; +import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext; +import static com.hedera.services.bdd.suites.HapiSuite.ONE_HUNDRED_HBARS; +import static com.hedera.services.bdd.suites.contract.Utils.getABIFor; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS; + +import com.esaulpaugh.headlong.abi.Address; +import com.hedera.services.bdd.junit.HapiTest; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Stream; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +public class HRCSetUnlimitedAutoAssociationsTest { + + @HapiTest + public Stream hrcSetUnlimitedAutoAssociations() { + final AtomicReference
    accountNum = new AtomicReference<>(); + return hapiTest( + cryptoCreate("account") + .balance(100 * ONE_HUNDRED_HBARS) + .maxAutomaticTokenAssociations(0) + .exposingCreatedIdTo(id -> accountNum.set(idAsHeadlongAddress(id))), + withOpContext((spec, opLog) -> allRunFor( + spec, + contractCallWithFunctionAbi( + "0.0." + accountNum.get().value(), + getABIFor( + com.hedera.services.bdd.suites.contract.Utils.FunctionType.FUNCTION, + "setUnlimitedAutomaticAssociations", + "IHRC904UnlimitedAutoAssociations"), + true) + .via("setUnlimitedAutoAssociations") + .payingWith("account") + .gas(1_000_000L), + getTxnRecord("setUnlimitedAutoAssociations") + .logged() + .hasPriority(recordWith() + .status(SUCCESS) + .contractCallResult(resultWith() + .resultThruAbi( + getABIFor( + com.hedera.services.bdd.suites.contract.Utils + .FunctionType.FUNCTION, + "setUnlimitedAutomaticAssociations", + "IHRC904UnlimitedAutoAssociations"), + isLiteralResult(new Object[] {Long.valueOf(22)})))), + getAccountInfo("account").hasMaxAutomaticAssociations(-1)))); + } + + @HapiTest + public Stream hrcSetDisabledAutoAssociations() { + final AtomicReference
    accountNum = new AtomicReference<>(); + return hapiTest( + cryptoCreate("account") + .balance(100 * ONE_HUNDRED_HBARS) + .maxAutomaticTokenAssociations(10) + .exposingCreatedIdTo(id -> accountNum.set(idAsHeadlongAddress(id))), + withOpContext((spec, opLog) -> allRunFor( + spec, + contractCallWithFunctionAbi( + "0.0." + accountNum.get().value(), + getABIFor( + com.hedera.services.bdd.suites.contract.Utils.FunctionType.FUNCTION, + "setUnlimitedAutomaticAssociations", + "IHRC904UnlimitedAutoAssociations"), + false) + .via("setUnlimitedAutoAssociations") + .payingWith("account") + .gas(1_000_000L), + getTxnRecord("setUnlimitedAutoAssociations") + .logged() + .hasPriority(recordWith() + .status(SUCCESS) + .contractCallResult(resultWith() + .resultThruAbi( + getABIFor( + com.hedera.services.bdd.suites.contract.Utils + .FunctionType.FUNCTION, + "setUnlimitedAutomaticAssociations", + "IHRC904UnlimitedAutoAssociations"), + isLiteralResult(new Object[] {Long.valueOf(22)})))), + getAccountInfo("account").hasMaxAutomaticAssociations(0)))); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenCancelTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenCancelTest.java new file mode 100644 index 000000000000..462fb11d795f --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenCancelTest.java @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.dsl.contracts.TokenRedirectContract.HRC904CANCEL; +import static com.hedera.services.bdd.spec.transactions.TxnVerbs.tokenAirdrop; +import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.moving; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_PENDING_AIRDROP_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS; + +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.OrderedInIsolation; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import com.hedera.services.bdd.spec.transactions.token.TokenMovement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +@OrderedInIsolation +public class HRCTokenCancelTest { + + @Account(name = "sender", tinybarBalance = 100_000_000_000L) + static SpecAccount sender; + + @Account(name = "receiver", maxAutoAssociations = 0) + static SpecAccount receiver; + + @FungibleToken(name = "token", initialSupply = 1_000_000L) + static SpecFungibleToken token; + + @NonFungibleToken(name = "nft", numPreMints = 1) + static SpecNonFungibleToken nft; + + @BeforeAll + public static void setup(@NonNull final TestLifecycle lifecycle) { + lifecycle.doAdhoc( + sender.associateTokens(token, nft), + token.treasury().transferUnitsTo(sender, 10L, token), + nft.treasury().transferNFTsTo(sender, nft, 1L)); + } + + @HapiTest + @Order(1) + @DisplayName("Can cancel airdrop of fungible token") + public Stream canCancelAirdropOfFungibleToken() { + return hapiTest( + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0L)), + tokenAirdrop(moving(10L, token.name()).between(sender.name(), receiver.name())) + .payingWith(sender.name()), + token.call(HRC904CANCEL, "cancelAirdropFT", receiver).with(call -> call.payingWith(sender.name()))); + } + + @HapiTest + @Order(2) + @DisplayName("Can cancel airdrop of nft token") + public Stream canCancelAirdropOfNftToken() { + return hapiTest( + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0L)), + tokenAirdrop(TokenMovement.movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())) + .payingWith(sender.name()), + nft.call(HRC904CANCEL, "cancelAirdropNFT", receiver, 1L).with(call -> call.payingWith(sender.name()))); + } + + @HapiTest + @Order(3) + @DisplayName("Cannot cancel airdrop if not existing") + public Stream cannotCancelAirdropWhenNotExisting() { + return hapiTest(token.call(HRC904CANCEL, "cancelAirdropFT", receiver) + .with(call -> call.payingWith(sender.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } + + @HapiTest + @Order(4) + @DisplayName("Cannot cancel airdrop if receiver not existing") + public Stream cannotCancelAirdropWhenReceiverNotExisting() { + return hapiTest(token.call(HRC904CANCEL, "cancelAirdropFT", token) + .with(call -> call.payingWith(sender.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } + + @HapiTest + @Order(5) + @DisplayName("Cannot cancel nft airdrop if not existing") + public Stream cannotCancelNftAirdropWhenNotExisting() { + return hapiTest(nft.call(HRC904CANCEL, "cancelAirdropNFT", receiver, 1L) + .with(call -> call.payingWith(sender.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } + + @HapiTest + @Order(6) + @DisplayName("Cannot cancel nft airdrop if receiver not existing") + public Stream cannotCancelNftAirdropWhenReceiverNotExisting() { + return hapiTest(nft.call(HRC904CANCEL, "cancelAirdropNFT", nft, 1L) + .with(call -> call.payingWith(sender.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenClaimTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenClaimTest.java new file mode 100644 index 000000000000..07b2a4ac9277 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenClaimTest.java @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.dsl.contracts.TokenRedirectContract.HRC904CLAIM; +import static com.hedera.services.bdd.spec.transactions.TxnVerbs.tokenAirdrop; +import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.moving; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_PENDING_AIRDROP_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS; + +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.OrderedInIsolation; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import com.hedera.services.bdd.spec.transactions.token.TokenMovement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +@OrderedInIsolation +public class HRCTokenClaimTest { + + @Account(name = "sender", tinybarBalance = 100_000_000_000L) + static SpecAccount sender; + + @Account(name = "receiver", tinybarBalance = 100_000_000_000L, maxAutoAssociations = 0) + static SpecAccount receiver; + + @FungibleToken(name = "token", initialSupply = 1_000_000L) + static SpecFungibleToken token; + + @NonFungibleToken(name = "nft", numPreMints = 1) + static SpecNonFungibleToken nft; + + @BeforeAll + public static void setup(@NonNull final TestLifecycle lifecycle) { + lifecycle.doAdhoc( + sender.associateTokens(token, nft), + token.treasury().transferUnitsTo(sender, 10L, token), + nft.treasury().transferNFTsTo(sender, nft, 1L)); + } + + @Order(0) + @HapiTest + @DisplayName("Can claim airdrop of fungible token") + public Stream canClaimAirdropOfFungibleToken() { + return hapiTest( + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0L)), + tokenAirdrop(moving(10L, token.name()).between(sender.name(), receiver.name())) + .payingWith(sender.name()), + token.call(HRC904CLAIM, "claimAirdropFT", sender) + .payingWith(receiver) + .with(call -> call.signingWith(receiver.name())), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 10L))); + } + + @Order(1) + @HapiTest + @DisplayName("Can claim airdrop of nft token") + public Stream canClaimAirdropOfNftToken() { + return hapiTest( + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + tokenAirdrop(TokenMovement.movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())) + .payingWith(sender.name()), + nft.call(HRC904CLAIM, "claimAirdropNFT", sender, 1L) + .payingWith(receiver) + .with(call -> call.signingWith(receiver.name())), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 1L))); + } + + @Order(2) + @HapiTest + @DisplayName("Cannot claim airdrop if not existing") + public Stream cannotClaimAirdropWhenNotExisting() { + return hapiTest(token.call(HRC904CLAIM, "claimAirdropFT", sender) + .payingWith(receiver) + .with(call -> call.signingWith(receiver.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } + + @Order(3) + @HapiTest + @DisplayName("Cannot claim airdrop if sender not existing") + public Stream cannotClaimAirdropWhenSenderNotExisting() { + return hapiTest(token.call(HRC904CLAIM, "claimAirdropFT", token) + .payingWith(receiver) + .with(call -> call.signingWith(receiver.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } + + @Order(4) + @HapiTest + @DisplayName("Cannot claim nft airdrop if not existing") + public Stream cannotClaimNftAirdropWhenNotExisting() { + return hapiTest(nft.call(HRC904CLAIM, "claimAirdropNFT", sender, 1L) + .payingWith(receiver) + .with(call -> call.signingWith(receiver.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } + + @Order(5) + @HapiTest + @DisplayName("Cannot claim nft airdrop if sender not existing") + public Stream cannotClaimNftAirdropWhenSenderNotExisting() { + return hapiTest(nft.call(HRC904CLAIM, "claimAirdropNFT", nft, 1L) + .payingWith(receiver) + .with(call -> call.signingWith(receiver.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_PENDING_AIRDROP_ID))); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenRejectTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenRejectTest.java new file mode 100644 index 000000000000..227402cb2f18 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/HRCTokenRejectTest.java @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.dsl.contracts.TokenRedirectContract.HRC904REJECT; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.CONTRACT_REVERT_EXECUTED; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INSUFFICIENT_TOKEN_BALANCE; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_OWNER_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS; + +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +public class HRCTokenRejectTest { + + @Account(tinybarBalance = 100_000_000_000L) + static SpecAccount sender; + + @BeforeAll + public static void setUp(@NonNull TestLifecycle lifecycle) { + lifecycle.doAdhoc(sender.getInfo()); + } + + @HapiTest + @DisplayName("HRC rejectTokenFT works") + public Stream hrcFungibleWorks(@FungibleToken(initialSupply = 1000) SpecFungibleToken token) { + return hapiTest( + sender.associateTokens(token), + token.treasury().transferUnitsTo(sender, 10L, token), + token.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 990L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 10L)), + token.call(HRC904REJECT, "rejectTokenFT").with(call -> call.payingWith(sender.name())), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0L)), + token.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 1000L))); + } + + @HapiTest + @DisplayName("HRC rejectTokenNFTs works") + public Stream hrcNftWorks(@NonFungibleToken(numPreMints = 1) SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L), + nft.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 1L)), + nft.call(HRC904REJECT, "rejectTokenNFTs", new long[] {1L}).with(call -> call.payingWith(sender.name())), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + nft.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 1L))); + } + + @HapiTest + @DisplayName("HRC rejectTokenNFTs works for max allowed serials") + public Stream hrcNftWorksForMultipleSerials( + @NonFungibleToken(numPreMints = 10) SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L), + nft.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 10L)), + nft.call(HRC904REJECT, "rejectTokenNFTs", new long[] {1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L}) + .with(call -> call.payingWith(sender.name())) + .gas(1_000_000L), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + nft.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 10L))); + } + + @HapiTest + @DisplayName("HRC rejectTokenNFTs fails if account has no nft balance") + public Stream hrcNftFailsIfAccountHasNoBalance( + @NonFungibleToken(numPreMints = 1) SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + nft.call(HRC904REJECT, "rejectTokenNFTs", new long[] {1L}) + .with(call -> call.payingWith(sender.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INVALID_OWNER_ID))); + } + + @HapiTest + @DisplayName("HRC rejectTokenFT fails if account has no token balance") + public Stream hrcFungibleFailsIfAccountHasNoBalance(@FungibleToken SpecFungibleToken token) { + return hapiTest( + sender.associateTokens(token), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0L)), + token.call(HRC904REJECT, "rejectTokenFT") + .with(call -> call.payingWith(sender.name())) + .andAssert(txn -> txn.hasKnownStatuses(SUCCESS, INSUFFICIENT_TOKEN_BALANCE))); + } + + @HapiTest + @DisplayName("HRC rejectTokenNFTs fails if serials exceed limit") + public Stream hrcNftFailsForMultipleSerials( + @NonFungibleToken(numPreMints = 11) SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L), + nft.treasury().transferNFTsTo(sender, nft, 11L), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 11L)), + nft.call(HRC904REJECT, "rejectTokenNFTs", new long[] {1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L}) + .with(call -> call.payingWith(sender.name())) + .gas(1_000_000L) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED))); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenCancelAirdropSystemContractTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenCancelAirdropSystemContractTest.java new file mode 100644 index 000000000000..f7e8ba9b3e32 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenCancelAirdropSystemContractTest.java @@ -0,0 +1,377 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.includingFungiblePendingAirdrop; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.includingNftPendingAirdrop; +import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.recordWith; +import static com.hedera.services.bdd.spec.queries.QueryVerbs.getTxnRecord; +import static com.hedera.services.bdd.spec.transactions.TxnVerbs.tokenAirdrop; +import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.moving; +import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.movingUnique; +import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor; +import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.CONTRACT_REVERT_EXECUTED; + +import com.esaulpaugh.headlong.abi.Address; +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.HapiSpec; +import com.hedera.services.bdd.spec.SpecOperation; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.Contract; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecContract; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import com.hedera.services.bdd.spec.dsl.operations.queries.GetBalanceOperation; +import com.hedera.services.bdd.spec.transactions.token.TokenMovement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +public class TokenCancelAirdropSystemContractTest { + + @Contract(contract = "CancelAirdrop", creationGas = 1_000_000L) + static SpecContract cancelAirdrop; + + @Account(name = "sender", tinybarBalance = 100_000_000_000L) + static SpecAccount sender; + + @Account(name = "receiver", maxAutoAssociations = 0) + static SpecAccount receiver; + + @FungibleToken(name = "token", initialSupply = 1000) + static SpecFungibleToken token; + + @BeforeAll + public static void setUp(final @NonNull TestLifecycle lifecycle) { + lifecycle.doAdhoc( + sender.authorizeContract(cancelAirdrop), + sender.associateTokens(token), + token.treasury().transferUnitsTo(sender, 1000, token)); + } + + @HapiTest + @DisplayName("Can cancel 1 fungible airdrop") + public Stream cancelAirdrop() { + return hapiTest( + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0)), + tokenAirdrop(moving(10, token.name()).between(sender.name(), receiver.name())) + .payingWith(sender.name()) + .via("tokenAirdrop"), + getTxnRecord("tokenAirdrop") + .hasPriority(recordWith() + .pendingAirdrops(includingFungiblePendingAirdrop( + moving(10, token.name()).between(sender.name(), receiver.name())))), + cancelAirdrop + .call("cancelAirdrop", sender, receiver, token) + .payingWith(sender) + .via("cancelAirdrop"), + getTxnRecord("cancelAirdrop").hasPriority(recordWith().pendingAirdropsCount(0))); + } + + @HapiTest + @DisplayName("Can cancel 1 nft airdrop") + public Stream cancelNftAirdrop(@NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L), + receiver.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0)), + tokenAirdrop(movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())) + .payingWith(sender.name()) + .via("tokenAirdrop"), + getTxnRecord("tokenAirdrop") + .hasPriority(recordWith() + .pendingAirdrops(includingNftPendingAirdrop( + movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())))), + cancelAirdrop + .call("cancelNFTAirdrop", sender, receiver, nft, 1L) + .payingWith(sender) + .via("cancelAirdrop"), + getTxnRecord("cancelAirdrop").hasPriority(recordWith().pendingAirdropsCount(0))); + } + + @HapiTest + @DisplayName("Can cancel 10 fungible airdrops") + public Stream cancel10Airdrops( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token4, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token5, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft4, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft5) { + final var tokenList = List.of(token1, token2, token3, token4, token5); + final var nftList = List.of(nft1, nft2, nft3, nft4, nft5); + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, prepareTokensAndBalances(sender, receiver, tokenList, nftList)); + prepareAirdrops(tokenList, nftList, spec); + final var senders = prepareSenderAddresses( + spec, sender, sender, sender, sender, sender, sender, sender, sender, sender, sender); + final var receivers = prepareReceiverAddresses( + spec, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, + receiver); + final var tokens = prepareTokenAddresses(spec, token1, token2, token3, token4, token5); + final var nfts = prepareNftAddresses(spec, nft1, nft2, nft3, nft4, nft5); + final var combined = + Stream.concat(Arrays.stream(tokens), Arrays.stream(nfts)).toArray(Address[]::new); + final var serials = new long[] {0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L}; + allRunFor( + spec, + cancelAirdrop + .call("cancelAirdrops", senders, receivers, combined, serials) + .via("cancelAirdrops"), + getTxnRecord("cancelAirdrops").hasPriority(recordWith().pendingAirdropsCount(0)), + checkForEmptyBalance(receiver, tokenList, nftList)); + })); + } + + @HapiTest + @DisplayName("Can cancel 3 fungible airdrops") + public Stream cancel3Airdrops( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3) { + final var tokenList = List.of(token1, token2, token3); + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, prepareTokensAndBalances(sender, receiver, tokenList, List.of())); + prepareAirdrops(tokenList, List.of(), spec); + final var senders = prepareSenderAddresses(spec, sender, sender, sender); + final var receivers = prepareReceiverAddresses(spec, receiver, receiver, receiver); + final var tokens = prepareTokenAddresses(spec, token1, token2, token3); + final var serials = new long[] {0L, 0L, 0L}; + allRunFor( + spec, + cancelAirdrop + .call("cancelAirdrops", senders, receivers, tokens, serials) + .via("cancelAirdrops"), + getTxnRecord("cancelAirdrops").hasPriority(recordWith().pendingAirdropsCount(0)), + checkForEmptyBalance(receiver, tokenList, List.of())); + })); + } + + @HapiTest + @DisplayName("Fails to cancel 11 pending airdrops") + public Stream failToCancel11Airdrops( + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token1, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token2, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token3, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token4, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token5, + @NonNull @FungibleToken(initialSupply = 1_000_000L) final SpecFungibleToken token6, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft1, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft2, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft3, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft4, + @NonNull @NonFungibleToken(numPreMints = 1) final SpecNonFungibleToken nft5) { + final var tokenList = List.of(token1, token2, token3, token4, token5, token6); + final var nftList = List.of(nft1, nft2, nft3, nft4, nft5); + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, prepareTokensAndBalances(sender, receiver, tokenList, nftList)); + // Spread transactions to avoid hitting the max airdrops limit + prepareAirdrops(List.of(token1, token2, token3), List.of(), spec); + prepareAirdrops(List.of(token4, token5, token6), List.of(), spec); + prepareAirdrops(List.of(), nftList, spec); + final var senders = prepareSenderAddresses( + spec, sender, sender, sender, sender, sender, sender, sender, sender, sender, sender, sender); + final var receivers = prepareReceiverAddresses( + spec, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, receiver, + receiver, receiver); + final var tokens = prepareTokenAddresses(spec, token1, token2, token3, token4, token5); + final var nfts = prepareNftAddresses(spec, nft1, nft2, nft3, nft4, nft5); + final var combined = + Stream.concat(Arrays.stream(tokens), Arrays.stream(nfts)).toArray(Address[]::new); + final var serials = new long[] {0L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L}; + allRunFor( + spec, + cancelAirdrop + .call("cancelAirdrops", senders, receivers, combined, serials) + .via("cancelAirdrops") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + })); + } + + @HapiTest + @DisplayName("Fails to cancel pending airdrop with invalid token") + public Stream failToCancel1AirdropWithInvalidToken() { + return hapiTest(cancelAirdrop + .call("cancelAirdrop", sender, receiver, receiver) + .payingWith(sender) + .via("cancelAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @HapiTest + @DisplayName("Fails to cancel pending airdrop with invalid sender") + public Stream failToCancel1AirdropWithInvalidSender() { + return hapiTest(cancelAirdrop + .call("cancelAirdrop", token, receiver, token) + .payingWith(sender) + .via("cancelAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @HapiTest + @DisplayName("Fails to cancel airdrop without having any pending airdrops") + public Stream failToCancelAirdropWhenThereAreNoPending() { + return hapiTest(cancelAirdrop + .call("cancelAirdrop", sender, receiver, token) + .payingWith(sender) + .via("cancelAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @HapiTest + @DisplayName("Fails to cancel pending airdrop with invalid receiver") + public Stream failToCancel1AirdropWithInvalidReceiver() { + return hapiTest(cancelAirdrop + .call("cancelAirdrop", sender, token, token) + .payingWith(sender) + .via("cancelAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @HapiTest + @DisplayName("Fails to cancel nft airdrop with invalid nft") + public Stream failToCancel1AirdropWithInvalidNft() { + return hapiTest(cancelAirdrop + .call("cancelNFTAirdrop", sender, receiver, receiver, 1L) + .payingWith(sender) + .via("cancelAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @HapiTest + @DisplayName("Fails to cancel nft airdrop with invalid nft serial") + public Stream failToCancel1AirdropWithInvalidSerial(@NonFungibleToken final SpecNonFungibleToken nft) { + return hapiTest( + sender.associateTokens(nft), + cancelAirdrop + .call("cancelNFTAirdrop", sender, receiver, nft, 1L) + .payingWith(sender) + .via("cancelAirdrop") + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + } + + @HapiTest + private void prepareAirdrops( + @NonNull List tokens, @NonNull List nfts, @NonNull HapiSpec spec) { + var tokenMovements = prepareFTAirdrops(sender, receiver, tokens); + var nftMovements = prepareNFTAirdrops(sender, receiver, nfts); + allRunFor( + spec, + tokenAirdrop(Stream.of(tokenMovements, nftMovements) + .flatMap(Collection::stream) + .toArray(TokenMovement[]::new)) + .payingWith(sender.name()) + .via("tokenAirdrop"), + getTxnRecord("tokenAirdrop") + .hasPriority(recordWith() + .pendingAirdrops( + includingFungiblePendingAirdrop(tokenMovements.toArray(TokenMovement[]::new))) + .pendingAirdrops( + includingNftPendingAirdrop(nftMovements.toArray(TokenMovement[]::new))))); + } + + private SpecOperation[] prepareTokensAndBalances( + final SpecAccount sender, + final SpecAccount receiver, + final List tokens, + final List nfts) { + ArrayList specOperations = new ArrayList<>(); + specOperations.addAll(List.of( + sender.associateTokens(tokens.toArray(SpecFungibleToken[]::new)), + sender.associateTokens(nfts.toArray(SpecNonFungibleToken[]::new)), + checkForEmptyBalance(receiver, tokens, nfts))); + specOperations.addAll(tokens.stream() + .map(token -> token.treasury().transferUnitsTo(sender, 1_000L, token)) + .toList()); + specOperations.addAll(nfts.stream() + .map(nft -> nft.treasury().transferNFTsTo(sender, nft, 1L)) + .toList()); + + return specOperations.toArray(SpecOperation[]::new); + } + + private GetBalanceOperation checkForEmptyBalance( + final SpecAccount receiver, final List tokens, final List nfts) { + return receiver.getBalance().andAssert(balance -> { + tokens.forEach(token -> balance.hasTokenBalance(token.name(), 0L)); + nfts.forEach(nft -> balance.hasTokenBalance(nft.name(), 0L)); + }); + } + + private Address[] prepareSenderAddresses(@NonNull HapiSpec spec, @NonNull SpecAccount... senders) { + return Arrays.stream(senders) + .map(sender -> sender.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareReceiverAddresses(@NonNull HapiSpec spec, @NonNull SpecAccount... receivers) { + return Arrays.stream(receivers) + .map(receiver -> receiver.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareTokenAddresses(@NonNull HapiSpec spec, @NonNull SpecFungibleToken... tokens) { + return Arrays.stream(tokens) + .map(token -> token.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private Address[] prepareNftAddresses(@NonNull HapiSpec spec, @NonNull SpecNonFungibleToken... nfts) { + return Arrays.stream(nfts) + .map(nft -> nft.addressOn(spec.targetNetworkOrThrow())) + .toArray(Address[]::new); + } + + private List prepareFTAirdrops( + @NonNull final SpecAccount sender, + @NonNull final SpecAccount receiver, + @NonNull final List tokens) { + return tokens.stream() + .map(token -> moving(10, token.name()).between(sender.name(), receiver.name())) + .toList(); + } + + private List prepareNFTAirdrops( + @NonNull final SpecAccount sender, + @NonNull final SpecAccount receiver, + @NonNull final List nfts) { + return nfts.stream() + .map(nft -> movingUnique(nft.name(), 1L).between(sender.name(), receiver.name())) + .toList(); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenRejectSystemContractTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenRejectSystemContractTest.java new file mode 100644 index 000000000000..f8d040c6d8c1 --- /dev/null +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenRejectSystemContractTest.java @@ -0,0 +1,258 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.hedera.services.bdd.suites.contract.precompile; + +import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT; +import static com.hedera.services.bdd.spec.HapiSpec.hapiTest; +import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor; +import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.CONTRACT_REVERT_EXECUTED; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INSUFFICIENT_TOKEN_BALANCE; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_NFT_ID; +import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_TOKEN_ID; + +import com.esaulpaugh.headlong.abi.Address; +import com.hedera.services.bdd.junit.HapiTest; +import com.hedera.services.bdd.junit.HapiTestLifecycle; +import com.hedera.services.bdd.junit.support.TestLifecycle; +import com.hedera.services.bdd.spec.dsl.annotations.Account; +import com.hedera.services.bdd.spec.dsl.annotations.Contract; +import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken; +import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecAccount; +import com.hedera.services.bdd.spec.dsl.entities.SpecContract; +import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken; +import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Tag; + +@Tag(SMART_CONTRACT) +@HapiTestLifecycle +public class TokenRejectSystemContractTest { + + @Contract(contract = "TokenReject", creationGas = 1_000_000L) + static SpecContract tokenReject; + + @Account(tinybarBalance = 1_000_000_000L) + static SpecAccount sender; + + @BeforeAll + public static void setup(final @NonNull TestLifecycle lifecycle) { + lifecycle.doAdhoc(sender.authorizeContract(tokenReject)); + } + + @HapiTest + @DisplayName("Reject fungible token") + public Stream tokenRejectSystemContractTest( + @FungibleToken(initialSupply = 1000) SpecFungibleToken token) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(token), + token.treasury().transferUnitsTo(sender, 100, token), + token.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 900L))); + final var tokenAddress = token.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject.call("rejectTokens", sender, new Address[] {tokenAddress}, new Address[0]), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 0L)), + token.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token.name(), 1000L))); + })); + } + + @HapiTest + @DisplayName("Reject non-fungible token") + public Stream tokenRejectSystemContractNftTest( + @NonFungibleToken(numPreMints = 1) SpecNonFungibleToken nft) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(nft), + nft.treasury().transferNFTsTo(sender, nft, 1L), + nft.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L))); + final var tokenAddress = nft.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject.call("rejectTokens", sender, new Address[] {}, new Address[] {tokenAddress}), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 0L)), + nft.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft.name(), 1L))); + })); + } + + @HapiTest + @DisplayName("Reject multiple tokens") + public Stream tokenRejectForMultipleTokens( + @FungibleToken SpecFungibleToken token1, + @FungibleToken SpecFungibleToken token2, + @FungibleToken SpecFungibleToken token3, + @NonFungibleToken(numPreMints = 1) SpecNonFungibleToken nft1, + @NonFungibleToken(numPreMints = 1) SpecNonFungibleToken nft2, + @NonFungibleToken(numPreMints = 1) SpecNonFungibleToken nft3) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens(token1, token2, token3, nft1, nft2, nft3), + token1.treasury().transferUnitsTo(sender, 100, token1), + token2.treasury().transferUnitsTo(sender, 100, token2), + token3.treasury().transferUnitsTo(sender, 100, token3), + nft1.treasury().transferNFTsTo(sender, nft1, 1L), + nft2.treasury().transferNFTsTo(sender, nft2, 1L), + nft3.treasury().transferNFTsTo(sender, nft3, 1L)); + final var token1Address = token1.addressOn(spec.targetNetworkOrThrow()); + final var token2Address = token2.addressOn(spec.targetNetworkOrThrow()); + final var token3Address = token3.addressOn(spec.targetNetworkOrThrow()); + final var nft1Address = nft1.addressOn(spec.targetNetworkOrThrow()); + final var nft2Address = nft2.addressOn(spec.targetNetworkOrThrow()); + final var nft3Address = nft3.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject + .call( + "rejectTokens", + sender, + new Address[] {token1Address, token2Address, token3Address}, + new Address[] {nft1Address, nft2Address, nft3Address}) + .gas(1_000_000L), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 0L)), + token1.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token1.name(), 100L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 0L)), + token2.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token2.name(), 100L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 0L)), + token3.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(token3.name(), 100L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 0L)), + nft1.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft1.name(), 1L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 0L)), + nft2.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft2.name(), 1L)), + sender.getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 0L)), + nft3.treasury().getBalance().andAssert(balance -> balance.hasTokenBalance(nft3.name(), 1L))); + })); + } + + @HapiTest + @DisplayName("Fails to reject tokens if limits exceeded") + public Stream failsIfLimitsExceeded( + @FungibleToken SpecFungibleToken token1, + @FungibleToken SpecFungibleToken token2, + @FungibleToken SpecFungibleToken token3, + @FungibleToken SpecFungibleToken token4, + @FungibleToken SpecFungibleToken token5, + @FungibleToken SpecFungibleToken token6, + @FungibleToken SpecFungibleToken token7, + @FungibleToken SpecFungibleToken token8, + @FungibleToken SpecFungibleToken token9, + @FungibleToken SpecFungibleToken token10, + @FungibleToken SpecFungibleToken token11) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor( + spec, + sender.associateTokens( + token1, token2, token3, token4, token5, token6, token7, token8, token9, token10, token11), + token1.treasury().transferUnitsTo(sender, 100, token1), + token2.treasury().transferUnitsTo(sender, 100, token2), + token3.treasury().transferUnitsTo(sender, 100, token3), + token4.treasury().transferUnitsTo(sender, 100, token4), + token5.treasury().transferUnitsTo(sender, 100, token5), + token6.treasury().transferUnitsTo(sender, 100, token6), + token7.treasury().transferUnitsTo(sender, 100, token7), + token8.treasury().transferUnitsTo(sender, 100, token8), + token9.treasury().transferUnitsTo(sender, 100, token9), + token10.treasury().transferUnitsTo(sender, 100, token10), + token11.treasury().transferUnitsTo(sender, 100, token11)); + final var token1Address = token1.addressOn(spec.targetNetworkOrThrow()); + final var token2Address = token2.addressOn(spec.targetNetworkOrThrow()); + final var token3Address = token3.addressOn(spec.targetNetworkOrThrow()); + final var token4Address = token4.addressOn(spec.targetNetworkOrThrow()); + final var token5Address = token5.addressOn(spec.targetNetworkOrThrow()); + final var token6Address = token6.addressOn(spec.targetNetworkOrThrow()); + final var token7Address = token7.addressOn(spec.targetNetworkOrThrow()); + final var token8Address = token8.addressOn(spec.targetNetworkOrThrow()); + final var token9Address = token9.addressOn(spec.targetNetworkOrThrow()); + final var token10Address = token10.addressOn(spec.targetNetworkOrThrow()); + final var token11Address = token11.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject + .call( + "rejectTokens", + sender, + new Address[] { + token1Address, + token2Address, + token3Address, + token4Address, + token5Address, + token6Address, + token7Address, + token8Address, + token9Address, + token10Address, + token11Address + }, + new Address[0]) + .gas(1_000_000L) + .andAssert(txn -> txn.hasKnownStatus(CONTRACT_REVERT_EXECUTED))); + })); + } + + @HapiTest + @DisplayName("Fails to reject tokens if there are no associated tokens") + public Stream failsIfNoAssociatedTokens(@FungibleToken SpecFungibleToken token) { + return hapiTest(withOpContext((spec, opLog) -> { + allRunFor(spec, sender.associateTokens(token)); + final var tokenAddress = token.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject + .call("rejectTokens", sender, new Address[] {tokenAddress}, new Address[0]) + .gas(1_000_000L) + .andAssert( + txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INSUFFICIENT_TOKEN_BALANCE))); + })); + } + + @HapiTest + @DisplayName("Fails if token is invalid") + public Stream failsIfTokenIsInvalid() { + return hapiTest(withOpContext((spec, opLog) -> { + final var senderAddress = sender.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject + .call("rejectTokens", sender, new Address[] {senderAddress}, new Address[0]) + .gas(1_000_000L) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_TOKEN_ID))); + })); + } + + @HapiTest + @DisplayName("Fails if NFT is invalid") + public Stream failsIfNFTIsInvalid() { + return hapiTest(withOpContext((spec, opLog) -> { + final var senderAddress = sender.addressOn(spec.targetNetworkOrThrow()); + allRunFor( + spec, + tokenReject + .call("rejectTokens", sender, new Address[] {}, new Address[] {senderAddress}) + .gas(1_000_000L) + .andAssert(txn -> txn.hasKnownStatuses(CONTRACT_REVERT_EXECUTED, INVALID_NFT_ID))); + })); + } +} diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/HollowAccountFinalizationSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/HollowAccountFinalizationSuite.java index 3af715993831..a8bd9210e8bb 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/HollowAccountFinalizationSuite.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/HollowAccountFinalizationSuite.java @@ -27,6 +27,7 @@ import static com.hedera.services.bdd.spec.assertions.TransferListAsserts.noCreditAboveNumber; import static com.hedera.services.bdd.spec.keys.TrieSigMapGenerator.uniqueWithFullPrefixesFor; import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAccountBalance; +import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAccountInfo; import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAliasedAccountInfo; import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAutoCreatedAccountBalance; import static com.hedera.services.bdd.spec.queries.QueryVerbs.getTxnRecord; @@ -82,6 +83,7 @@ import com.hedera.services.bdd.spec.queries.meta.HapiGetTxnRecord; import com.hedera.services.bdd.spec.transactions.crypto.HapiCryptoTransfer; import com.hederahashgraph.api.proto.java.AccountID; +import com.hederahashgraph.api.proto.java.Key; import com.hederahashgraph.api.proto.java.TokenID; import com.hederahashgraph.api.proto.java.TokenTransferList; import com.hederahashgraph.api.proto.java.TokenType; @@ -557,6 +559,12 @@ final Stream hollowAccountCompletionWithEthereumContractCreate() { getTxnRecord(TRANSFER_TXN_2).andAllChildRecords().logged(); allRunFor(spec, op2, op3, hapiGetSecondTxnRecord); + // ensure that the finalized contract has a self management key + final var contractIdKey = Key.newBuilder() + .setContractID(spec.registry().getContractId(CONTRACT)) + .build(); + final var op4 = getAccountInfo(CONTRACT).has(accountWith().key(contractIdKey)); + allRunFor(spec, op4); })); } diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip904/AirdropsDisabledTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip904/AirdropsDisabledTest.java index 1932b09818bd..919d272b9af6 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip904/AirdropsDisabledTest.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip904/AirdropsDisabledTest.java @@ -244,7 +244,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() .numKvPairs(4) - .hasStandinContractKey() + .defaultAdminKey() .maxAutoAssociations(2) .hasAlreadyUsedAutomaticAssociations(2) .memo(LAZY_MEMO) @@ -366,7 +366,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) // check created contract sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() - .hasStandinContractKey() + .defaultAdminKey() .maxAutoAssociations(fungibleTransfersSize) .hasAlreadyUsedAutomaticAssociations(fungibleTransfersSize) .memo(LAZY_MEMO) @@ -565,7 +565,7 @@ contract, GET_BYTECODE, asHeadlongAddress(factoryEvmAddress.get()), salt) sourcing(() -> getContractInfo(mergedAliasAddr.get()) .has(contractWith() .numKvPairs(2) - .hasStandinContractKey() + .defaultAdminKey() .maxAutoAssociations(2) .hasAlreadyUsedAutomaticAssociations(2) .memo(LAZY_MEMO) diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleCreateSpecs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleCreateTest.java similarity index 99% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleCreateSpecs.java rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleCreateTest.java index 727b9bde29c1..06daf64a39b3 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleCreateSpecs.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleCreateTest.java @@ -92,7 +92,7 @@ import org.junit.jupiter.api.DynamicTest; import org.junit.jupiter.api.Tag; -public class ScheduleCreateSpecs { +public class ScheduleCreateTest { @HapiTest final Stream aliasNotAllowedAsPayer() { return defaultHapiSpec("BodyAndPayerCreation") @@ -164,7 +164,7 @@ final Stream validateSignersInInfo() { .then(getScheduleInfo(VALID_SCHEDULE) .hasScheduleId(VALID_SCHEDULE) .hasRecordedScheduledTxn() - .hasSignatories(SENDER)); + .hasSignatories(DEFAULT_PAYER, SENDER)); } @HapiTest diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleDeleteSpecs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleDeleteTest.java similarity index 99% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleDeleteSpecs.java rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleDeleteTest.java index 560bdaabbc4f..cb02f1f9510f 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleDeleteSpecs.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleDeleteTest.java @@ -47,7 +47,7 @@ import java.util.stream.Stream; import org.junit.jupiter.api.DynamicTest; -public class ScheduleDeleteSpecs { +public class ScheduleDeleteTest { @HapiTest final Stream deleteWithNoAdminKeyFails() { return defaultHapiSpec("DeleteWithNoAdminKeyFails") diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionSpecs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionTest.java similarity index 99% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionSpecs.java rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionTest.java index e2c78a862e87..1b00b36a885e 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionSpecs.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionTest.java @@ -143,7 +143,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.DynamicTest; -public class ScheduleExecutionSpecs { +public class ScheduleExecutionTest { private final long normalTriggeredTxnTimestampOffset = 1; @SuppressWarnings("java:S2245") // using java.util.Random in tests is fine diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleRecordSpecs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleRecordTest.java similarity index 99% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleRecordSpecs.java rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleRecordTest.java index 4ba7a187c88d..c4428cf5ca04 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleRecordSpecs.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleRecordTest.java @@ -68,7 +68,7 @@ import java.util.stream.Stream; import org.junit.jupiter.api.DynamicTest; -public class ScheduleRecordSpecs { +public class ScheduleRecordTest { @HapiTest final Stream noFeesChargedIfTriggeredPayerIsUnwilling() { return defaultHapiSpec("NoFeesChargedIfTriggeredPayerIsUnwilling") diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignSpecs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java similarity index 96% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignSpecs.java rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java index d3c1b59747ba..b431f7f5ab4e 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignSpecs.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java @@ -82,7 +82,7 @@ import org.junit.jupiter.api.DynamicTest; import org.junit.jupiter.api.Tag; -public class ScheduleSignSpecs { +public class ScheduleSignTest { @HapiTest final Stream idVariantsTreatedAsExpected() { return defaultHapiSpec("idVariantsTreatedAsExpected") @@ -103,19 +103,17 @@ final Stream signingDeletedSchedulesHasNoEffect() { String schedule = "Z"; String adminKey = ADMIN; - return defaultHapiSpec("SigningDeletedSchedulesHasNoEffect") - .given( - newKeyNamed(adminKey), - cryptoCreate(sender), - cryptoCreate(receiver).balance(0L), - scheduleCreate(schedule, cryptoTransfer(tinyBarsFromTo(sender, receiver, 1))) - .adminKey(adminKey) - .payingWith(DEFAULT_PAYER), - getAccountBalance(receiver).hasTinyBars(0L)) - .when( - scheduleDelete(schedule).signedBy(DEFAULT_PAYER, adminKey), - scheduleSign(schedule).alsoSigningWith(sender).hasKnownStatus(SCHEDULE_ALREADY_DELETED)) - .then(getAccountBalance(receiver).hasTinyBars(0L)); + return hapiTest( + newKeyNamed(adminKey), + cryptoCreate(sender), + cryptoCreate(receiver).balance(0L), + scheduleCreate(schedule, cryptoTransfer(tinyBarsFromTo(sender, receiver, 1))) + .adminKey(adminKey) + .payingWith(DEFAULT_PAYER), + getAccountBalance(receiver).hasTinyBars(0L), + scheduleDelete(schedule).signedBy(DEFAULT_PAYER, adminKey), + scheduleSign(schedule).alsoSigningWith(sender).hasKnownStatus(SCHEDULE_ALREADY_DELETED), + getAccountBalance(receiver).hasTinyBars(0L)); } @HapiTest @@ -455,13 +453,12 @@ final Stream scheduleAlreadyExecutedDoesntRepeatTransaction() { final Stream basicSignatureCollectionWorks() { var txnBody = cryptoTransfer(tinyBarsFromTo(SENDER, RECEIVER, 1)); - return defaultHapiSpec("BasicSignatureCollectionWorks") - .given( - cryptoCreate(SENDER), - cryptoCreate(RECEIVER).receiverSigRequired(true), - scheduleCreate(BASIC_XFER, txnBody)) - .when(scheduleSign(BASIC_XFER).alsoSigningWith(RECEIVER)) - .then(getScheduleInfo(BASIC_XFER).hasSignatories(RECEIVER)); + return hapiTest( + cryptoCreate(SENDER), + cryptoCreate(RECEIVER).receiverSigRequired(true), + scheduleCreate(BASIC_XFER, txnBody), + scheduleSign(BASIC_XFER).alsoSigningWith(RECEIVER), + getScheduleInfo(BASIC_XFER).hasSignatories(DEFAULT_PAYER, RECEIVER)); } @HapiTest diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionSpecStateful.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/StatefulScheduleExecutionTest.java similarity index 99% rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionSpecStateful.java rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/StatefulScheduleExecutionTest.java index b7b46510ffa8..9fa4faf39f81 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleExecutionSpecStateful.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/StatefulScheduleExecutionTest.java @@ -67,7 +67,7 @@ import org.junit.jupiter.api.TestMethodOrder; @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class ScheduleExecutionSpecStateful { +public class StatefulScheduleExecutionTest { @HapiTest @Order(4) final Stream scheduledBurnWithInvalidTokenThrowsUnresolvableSigners() { diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/tss/RepeatableTssTests.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/tss/RepeatableTssTests.java index 8171d81826a8..640f64e54f0d 100644 --- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/tss/RepeatableTssTests.java +++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/tss/RepeatableTssTests.java @@ -52,6 +52,7 @@ import java.util.function.IntConsumer; import java.util.stream.Stream; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.DynamicTest; /** @@ -116,6 +117,8 @@ Stream blockStreamManagerCatchesUpWithIndirectProofs() { @LeakyRepeatableHapiTest( value = {NEEDS_TSS_CONTROL, NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION}, overrides = {"tss.keyCandidateRoster"}) + @Disabled + // Need to fix by adding Roster entries to the state before running this test. Will do in next PR Stream tssMessageSubmittedForRekeyingIsSuccessful() { return hapiTest( blockStreamMustIncludePassFrom(spec -> successfulTssMessageThenVote()), diff --git a/hedera-node/test-clients/src/main/java/module-info.java b/hedera-node/test-clients/src/main/java/module-info.java index 9867edbefc82..efb9d47a2896 100644 --- a/hedera-node/test-clients/src/main/java/module-info.java +++ b/hedera-node/test-clients/src/main/java/module-info.java @@ -61,7 +61,6 @@ exports com.hedera.services.bdd.junit.support.validators.utils; exports com.hedera.services.bdd.junit.support.validators.block; exports com.hedera.services.bdd.utils; - exports com.hedera.services.bdd.junit.hedera.embedded.fakes.tss; requires transitive com.hedera.node.app.hapi.fees; requires transitive com.hedera.node.app.hapi.utils; diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.bin new file mode 100644 index 000000000000..7481e6f56cbb --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506127c5806100206000396000f3fe608060405234801561001057600080fd5b506004361061009e5760003560e01c8063913073f911610066578063913073f9146101945780639b23d3d9146101c45780639ef773bf146101f4578063ba7ee6e514610224578063e6c6ac4c146102545761009e565b806315dacbea146100a35780631b79e5f9146100d357806352c0d7d414610103578063618dc65e146101335780637da131bf14610164575b600080fd5b6100bd60048036038101906100b891906117c5565b610284565b6040516100ca9190611848565b60405180910390f35b6100ed60048036038101906100e8919061188f565b6103a2565b6040516100fa9190611848565b60405180910390f35b61011d6004803603810190610118919061188f565b610497565b60405161012a9190611848565b60405180910390f35b61014d60048036038101906101489190611a3c565b61060c565b60405161015b929190611b30565b60405180910390f35b61017e60048036038101906101799190611c28565b610768565b60405161018b9190611848565b60405180910390f35b6101ae60048036038101906101a99190611ce3565b6108cf565b6040516101bb9190611848565b60405180910390f35b6101de60048036038101906101d991906117c5565b610b81565b6040516101eb9190611848565b60405180910390f35b61020e60048036038101906102099190611e29565b610c9f565b60405161021b9190611848565b60405180910390f35b61023e60048036038101906102399190611fa3565b610fa0565b60405161024b9190611848565b60405180910390f35b61026e60048036038101906102699190612012565b611161565b60405161027b9190611848565b60405180910390f35b600080600061016773ffffffffffffffffffffffffffffffffffffffff166315dacbea60e01b888888886040516024016102c19493929190612107565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161032b9190612188565b6000604051808303816000865af19150503d8060008114610368576040519150601f19603f3d011682016040523d82523d6000602084013e61036d565b606091505b50915091508161037e576015610393565b8080602001905181019061039291906121d8565b5b60030b92505050949350505050565b600080600167ffffffffffffffff8111156103c0576103bf611911565b5b6040519080825280602002602001820160405280156103f957816020015b6103e6611651565b8152602001906001900390816103de5790505b509050610404611651565b86816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505061044786868661135c565b8160200181905250808260008151811061046457610463612205565b5b6020026020010181905250610478826114ab565b9250601660030b8360070b1461048d57600080fd5b5050949350505050565b600080600167ffffffffffffffff8111156104b5576104b4611911565b5b6040519080825280602002602001820160405280156104ee57816020015b6104db611651565b8152602001906001900390816104d35790505b5090506104f9611651565b86816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff1681525050600061053e8787876115c0565b90506000600167ffffffffffffffff81111561055d5761055c611911565b5b60405190808252806020026020018201604052801561059657816020015b610583611688565b81526020019060019003908161057b5790505b50905081816000815181106105ae576105ad612205565b5b602002602001018190525080836040018190525082846000815181106105d7576105d6612205565b5b60200260200101819052506105eb846114ab565b9450601660030b8560070b1461060057600080fd5b50505050949350505050565b6000606060008061016773ffffffffffffffffffffffffffffffffffffffff1663618dc65e60e01b8787604051602401610647929190612234565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516106b19190612188565b6000604051808303816000865af19150503d80600081146106ee576040519150601f19603f3d011682016040523d82523d6000602084013e6106f3565b606091505b50915091507f4af4780e06fe8cb9df64b0794fa6f01399af979175bb988e35e0e57e594567bc828260405161072992919061227f565b60405180910390a18161074d57601560405180602001604052806000815250610751565b6016815b8160030b9150809450819550505050509250929050565b6000808451905060008167ffffffffffffffff81111561078b5761078a611911565b5b6040519080825280602002602001820160405280156107c457816020015b6107b1611651565b8152602001906001900390816107a95790505b50905060005b828110156108a6576107da611651565b8882815181106107ed576107ec612205565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505061086b88838151811061084257610841612205565b5b602002602001015188848151811061085d5761085c612205565b5b60200260200101518861135c565b81602001819052508083838151811061088757610886612205565b5b602002602001018190525050808061089e906122de565b9150506107ca565b506108b0816114ab565b9250601660030b8360070b146108c557600080fd5b5050949350505050565b600080600184516108e09190612326565b90506000600167ffffffffffffffff8111156108ff576108fe611911565b5b60405190808252806020026020018201604052801561093857816020015b610925611651565b81526020019060019003908161091d5790505b509050610943611651565b87816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506109836116e1565b87816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506000805b88518110156109e95787826109d4919061235a565b915080806109e1906122de565b9150506109bf565b50806109f4906123ba565b826020019060070b908160070b8152505060008567ffffffffffffffff811115610a2157610a20611911565b5b604051908082528060200260200182016040528015610a5a57816020015b610a476116e1565b815260200190600190039081610a3f5790505b5090508281600081518110610a7257610a71612205565b5b60200260200101819052506000600190505b86811015610b2b57610a946116e1565b8a8281518110610aa757610aa6612205565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505089816020019060070b908160070b8152505080838381518110610b0c57610b0b612205565b5b6020026020010181905250508080610b23906122de565b915050610a84565b508084602001819052508385600081518110610b4a57610b49612205565b5b6020026020010181905250610b5e856114ab565b9650601660030b8760070b14610b7357600080fd5b505050505050949350505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff16639b23d3d960e01b88888888604051602401610bbe9493929190612107565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050604051610c289190612188565b6000604051808303816000865af19150503d8060008114610c65576040519150601f19603f3d011682016040523d82523d6000602084013e610c6a565b606091505b509150915081610c7b576015610c90565b80806020019051810190610c8f91906121d8565b5b60030b92505050949350505050565b60008085518851610cb09190612326565b905060008167ffffffffffffffff811115610cce57610ccd611911565b5b604051908082528060200260200182016040528015610d0757816020015b610cf4611651565b815260200190600190039081610cec5790505b50905060005b8951811015610dea57610d1e611651565b8c8281518110610d3157610d30612205565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff1681525050610daf8b8381518110610d8657610d85612205565b5b60200260200101518b8481518110610da157610da0612205565b5b60200260200101518961135c565b816020018190525080838381518110610dcb57610dca612205565b5b6020026020010181905250508080610de2906122de565b915050610d0d565b5060008951905060005b83821015610f7257610e04611651565b8c8281518110610e1757610e16612205565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506000600167ffffffffffffffff811115610e7357610e72611911565b5b604051908082528060200260200182016040528015610eac57816020015b610e99611688565b815260200190600190039081610e915790505b509050610f088b8481518110610ec557610ec4612205565b5b60200260200101518b8581518110610ee057610edf612205565b5b60200260200101518a8681518110610efb57610efa612205565b5b60200260200101516115c0565b81600081518110610f1c57610f1b612205565b5b602002602001018190525080826040018190525081858581518110610f4457610f43612205565b5b60200260200101819052508380610f5a906122de565b94505050508080610f6a906122de565b915050610df4565b50610f7c826114ab565b9350601660030b8460070b14610f9157600080fd5b50505098975050505050505050565b600080825190506000600167ffffffffffffffff811115610fc457610fc3611911565b5b604051908082528060200260200182016040528015610ffd57816020015b610fea611651565b815260200190600190039081610fe25790505b509050611008611651565b86816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505060008367ffffffffffffffff81111561105c5761105b611911565b5b60405190808252806020026020018201604052801561109557816020015b611082611688565b81526020019060019003908161107a5790505b50905060005b8481101561110e576000600190506110ce898984815181106110c0576110bf612205565b5b6020026020010151836115c0565b8383815181106110e1576110e0612205565b5b602002602001018190525080806110f790612402565b915050508080611106906122de565b91505061109b565b50808260400181905250818360008151811061112d5761112c612205565b5b6020026020010181905250611141836114ab565b9450601660030b8560070b1461115657600080fd5b505050509392505050565b6000808551905060008167ffffffffffffffff81111561118457611183611911565b5b6040519080825280602002602001820160405280156111bd57816020015b6111aa611651565b8152602001906001900390816111a25790505b50905060005b82811015611333576111d3611651565b8882815181106111e6576111e5612205565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506000600167ffffffffffffffff81111561124257611241611911565b5b60405190808252806020026020018201604052801561127b57816020015b611268611688565b8152602001906001900390816112605790505b5090506112d789848151811061129457611293612205565b5b60200260200101518985815181106112af576112ae612205565b5b60200260200101518986815181106112ca576112c9612205565b5b60200260200101516115c0565b816000815181106112eb576112ea612205565b5b60200260200101819052508082604001819052508184848151811061131357611312612205565b5b60200260200101819052505050808061132b906122de565b9150506111c3565b5061133d816114ab565b9250601660030b8360070b1461135257600080fd5b5050949350505050565b60606113666116e1565b84816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff1681525050826113a8906123ba565b816020019060070b908160070b815250506113c16116e1565b84816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083816020019060070b908160070b81525050600267ffffffffffffffff81111561142657611425611911565b5b60405190808252806020026020018201604052801561145f57816020015b61144c6116e1565b8152602001906001900390816114445790505b509250818360008151811061147757611476612205565b5b6020026020010181905250808360018151811061149757611496612205565b5b602002602001018190525050509392505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff16632f34811960e01b856040516024016114e2919061276d565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161154c9190612188565b6000604051808303816000865af19150503d8060008114611589576040519150601f19603f3d011682016040523d82523d6000602084013e61158e565b606091505b50915091508161159f5760156115b4565b808060200190518101906115b391906121d8565b5b60030b92505050919050565b6115c8611688565b83816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505082816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505081816040019060070b908160070b815250509392505050565b6040518060600160405280600073ffffffffffffffffffffffffffffffffffffffff16815260200160608152602001606081525090565b6040518060800160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff168152602001600060070b81526020016000151581525090565b6040518060600160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600060070b81526020016000151581525090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061175c82611731565b9050919050565b61176c81611751565b811461177757600080fd5b50565b60008135905061178981611763565b92915050565b6000819050919050565b6117a28161178f565b81146117ad57600080fd5b50565b6000813590506117bf81611799565b92915050565b600080600080608085870312156117df576117de611727565b5b60006117ed8782880161177a565b94505060206117fe8782880161177a565b935050604061180f8782880161177a565b9250506060611820878288016117b0565b91505092959194509250565b60008160070b9050919050565b6118428161182c565b82525050565b600060208201905061185d6000830184611839565b92915050565b61186c8161182c565b811461187757600080fd5b50565b60008135905061188981611863565b92915050565b600080600080608085870312156118a9576118a8611727565b5b60006118b78782880161177a565b94505060206118c88782880161177a565b93505060406118d98782880161177a565b92505060606118ea8782880161187a565b91505092959194509250565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61194982611900565b810181811067ffffffffffffffff8211171561196857611967611911565b5b80604052505050565b600061197b61171d565b90506119878282611940565b919050565b600067ffffffffffffffff8211156119a7576119a6611911565b5b6119b082611900565b9050602081019050919050565b82818337600083830152505050565b60006119df6119da8461198c565b611971565b9050828152602081018484840111156119fb576119fa6118fb565b5b611a068482856119bd565b509392505050565b600082601f830112611a2357611a226118f6565b5b8135611a338482602086016119cc565b91505092915050565b60008060408385031215611a5357611a52611727565b5b6000611a618582860161177a565b925050602083013567ffffffffffffffff811115611a8257611a8161172c565b5b611a8e85828601611a0e565b9150509250929050565b6000819050919050565b611aab81611a98565b82525050565b600081519050919050565b600082825260208201905092915050565b60005b83811015611aeb578082015181840152602081019050611ad0565b60008484015250505050565b6000611b0282611ab1565b611b0c8185611abc565b9350611b1c818560208601611acd565b611b2581611900565b840191505092915050565b6000604082019050611b456000830185611aa2565b8181036020830152611b578184611af7565b90509392505050565b600067ffffffffffffffff821115611b7b57611b7a611911565b5b602082029050602081019050919050565b600080fd5b6000611ba4611b9f84611b60565b611971565b90508083825260208201905060208402830185811115611bc757611bc6611b8c565b5b835b81811015611bf05780611bdc888261177a565b845260208401935050602081019050611bc9565b5050509392505050565b600082601f830112611c0f57611c0e6118f6565b5b8135611c1f848260208601611b91565b91505092915050565b60008060008060808587031215611c4257611c41611727565b5b600085013567ffffffffffffffff811115611c6057611c5f61172c565b5b611c6c87828801611bfa565b945050602085013567ffffffffffffffff811115611c8d57611c8c61172c565b5b611c9987828801611bfa565b935050604085013567ffffffffffffffff811115611cba57611cb961172c565b5b611cc687828801611bfa565b9250506060611cd78782880161187a565b91505092959194509250565b60008060008060808587031215611cfd57611cfc611727565b5b6000611d0b8782880161177a565b9450506020611d1c8782880161177a565b935050604085013567ffffffffffffffff811115611d3d57611d3c61172c565b5b611d4987828801611bfa565b9250506060611d5a8782880161187a565b91505092959194509250565b600067ffffffffffffffff821115611d8157611d80611911565b5b602082029050602081019050919050565b6000611da5611da084611d66565b611971565b90508083825260208201905060208402830185811115611dc857611dc7611b8c565b5b835b81811015611df15780611ddd888261187a565b845260208401935050602081019050611dca565b5050509392505050565b600082601f830112611e1057611e0f6118f6565b5b8135611e20848260208601611d92565b91505092915050565b600080600080600080600080610100898b031215611e4a57611e49611727565b5b600089013567ffffffffffffffff811115611e6857611e6761172c565b5b611e748b828c01611bfa565b985050602089013567ffffffffffffffff811115611e9557611e9461172c565b5b611ea18b828c01611bfa565b975050604089013567ffffffffffffffff811115611ec257611ec161172c565b5b611ece8b828c01611bfa565b965050606089013567ffffffffffffffff811115611eef57611eee61172c565b5b611efb8b828c01611bfa565b955050608089013567ffffffffffffffff811115611f1c57611f1b61172c565b5b611f288b828c01611bfa565b94505060a089013567ffffffffffffffff811115611f4957611f4861172c565b5b611f558b828c01611bfa565b93505060c0611f668b828c0161187a565b92505060e089013567ffffffffffffffff811115611f8757611f8661172c565b5b611f938b828c01611dfb565b9150509295985092959890939650565b600080600060608486031215611fbc57611fbb611727565b5b6000611fca8682870161177a565b9350506020611fdb8682870161177a565b925050604084013567ffffffffffffffff811115611ffc57611ffb61172c565b5b61200886828701611bfa565b9150509250925092565b6000806000806080858703121561202c5761202b611727565b5b600085013567ffffffffffffffff81111561204a5761204961172c565b5b61205687828801611bfa565b945050602085013567ffffffffffffffff8111156120775761207661172c565b5b61208387828801611bfa565b935050604085013567ffffffffffffffff8111156120a4576120a361172c565b5b6120b087828801611bfa565b925050606085013567ffffffffffffffff8111156120d1576120d061172c565b5b6120dd87828801611dfb565b91505092959194509250565b6120f281611751565b82525050565b6121018161178f565b82525050565b600060808201905061211c60008301876120e9565b61212960208301866120e9565b61213660408301856120e9565b61214360608301846120f8565b95945050505050565b600081905092915050565b600061216282611ab1565b61216c818561214c565b935061217c818560208601611acd565b80840191505092915050565b60006121948284612157565b915081905092915050565b60008160030b9050919050565b6121b58161219f565b81146121c057600080fd5b50565b6000815190506121d2816121ac565b92915050565b6000602082840312156121ee576121ed611727565b5b60006121fc848285016121c3565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060408201905061224960008301856120e9565b818103602083015261225b8184611af7565b90509392505050565b60008115159050919050565b61227981612264565b82525050565b60006040820190506122946000830185612270565b81810360208301526122a68184611af7565b90509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006122e98261178f565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361231b5761231a6122af565b5b600182019050919050565b60006123318261178f565b915061233c8361178f565b9250828201905080821115612354576123536122af565b5b92915050565b60006123658261182c565b91506123708361182c565b925082820190507fffffffffffffffffffffffffffffffffffffffffffffffff80000000000000008112677fffffffffffffff821317156123b4576123b36122af565b5b92915050565b60006123c58261182c565b91507fffffffffffffffffffffffffffffffffffffffffffffffff800000000000000082036123f7576123f66122af565b5b816000039050919050565b600061240d8261182c565b9150677fffffffffffffff8203612427576124266122af565b5b600182019050919050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b61246781611751565b82525050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6124a28161182c565b82525050565b6124b181612264565b82525050565b6060820160008201516124cd600085018261245e565b5060208201516124e06020850182612499565b5060408201516124f360408501826124a8565b50505050565b600061250583836124b7565b60608301905092915050565b6000602082019050919050565b60006125298261246d565b6125338185612478565b935061253e83612489565b8060005b8381101561256f57815161255688826124f9565b975061256183612511565b925050600181019050612542565b5085935050505092915050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6080820160008201516125be600085018261245e565b5060208201516125d1602085018261245e565b5060408201516125e46040850182612499565b5060608201516125f760608501826124a8565b50505050565b600061260983836125a8565b60808301905092915050565b6000602082019050919050565b600061262d8261257c565b6126378185612587565b935061264283612598565b8060005b8381101561267357815161265a88826125fd565b975061266583612615565b925050600181019050612646565b5085935050505092915050565b6000606083016000830151612698600086018261245e565b50602083015184820360208601526126b0828261251e565b915050604083015184820360408601526126ca8282612622565b9150508091505092915050565b60006126e38383612680565b905092915050565b6000602082019050919050565b600061270382612432565b61270d818561243d565b93508360208202850161271f8561244e565b8060005b8581101561275b578484038952815161273c85826126d7565b9450612747836126eb565b925060208a01995050600181019050612723565b50829750879550505050505092915050565b6000602082019050818103600083015261278781846126f8565b90509291505056fea2646970667358221220f625020144994b84f26c8ed0edfb01250183ef2cb969750ea036dd2ef5fdf9cc64736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.json b/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.json new file mode 100644 index 000000000000..e140bf4f0c5a --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.json @@ -0,0 +1,371 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "bool", + "name": "", + "type": "bool" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "CallResponseEvent", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address[]", + "name": "token", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "nft", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "tokenSenders", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "tokenReceivers", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "nftSenders", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "nftReceivers", + "type": "address[]" + }, + { + "internalType": "int64", + "name": "tokenAmount", + "type": "int64" + }, + { + "internalType": "int64[]", + "name": "serials", + "type": "int64[]" + } + ], + "name": "mixedAirdrop", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "int64", + "name": "serial", + "type": "int64" + } + ], + "name": "nftAirdrop", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address[]", + "name": "receivers", + "type": "address[]" + } + ], + "name": "nftAirdropDistribute", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address[]", + "name": "nft", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "senders", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "receivers", + "type": "address[]" + }, + { + "internalType": "int64[]", + "name": "serials", + "type": "int64[]" + } + ], + "name": "nftNAmountAirdrops", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "bytes", + "name": "encodedFunctionSelector", + "type": "bytes" + } + ], + "name": "redirectForToken", + "outputs": [ + { + "internalType": "int256", + "name": "responseCode", + "type": "int256" + }, + { + "internalType": "bytes", + "name": "response", + "type": "bytes" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "int64", + "name": "amount", + "type": "int64" + } + ], + "name": "tokenAirdrop", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address[]", + "name": "receivers", + "type": "address[]" + }, + { + "internalType": "int64", + "name": "amount", + "type": "int64" + } + ], + "name": "tokenAirdropDistribute", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address[]", + "name": "tokens", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "senders", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "receivers", + "type": "address[]" + }, + { + "internalType": "int64", + "name": "amount", + "type": "int64" + } + ], + "name": "tokenNAmountAirdrops", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "serialNumber", + "type": "uint256" + } + ], + "name": "transferFromNFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.sol new file mode 100644 index 000000000000..0dcda9d6db36 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/Airdrop/Airdrop.sol @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity >=0.5.0 <0.9.0; +pragma experimental ABIEncoderV2; + +import "./HederaTokenService.sol"; + +contract Airdrop is HederaTokenService { + function tokenAirdrop(address token, address sender, address receiver, int64 amount) public returns (int64 responseCode) { + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](1); + IHederaTokenService.TokenTransferList memory airdrop; + + airdrop.token = token; + airdrop.transfers = prepareAA(sender, receiver, amount); + tokenTransfers[0] = airdrop; + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function nftAirdrop(address token, address sender, address receiver, int64 serial)public returns (int64 responseCode) { + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](1); + IHederaTokenService.TokenTransferList memory airdrop; + + airdrop.token = token; + IHederaTokenService.NftTransfer memory nftTransfer = prepareNftTransfer(sender, receiver, serial); + IHederaTokenService.NftTransfer[] memory nftTransfers = new IHederaTokenService.NftTransfer[](1); + nftTransfers[0] = nftTransfer; + airdrop.nftTransfers = nftTransfers; + tokenTransfers[0] = airdrop; + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function tokenNAmountAirdrops(address[] memory tokens, address[] memory senders, address[] memory receivers, int64 amount) public returns (int64 responseCode) { + uint256 length = senders.length; + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](length); + for (uint256 i = 0; i < length; i++) + { + IHederaTokenService.TokenTransferList memory airdrop; + airdrop.token = tokens[i]; + airdrop.transfers = prepareAA(senders[i], receivers[i], amount); + tokenTransfers[i] = airdrop; + } + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function nftNAmountAirdrops(address[] memory nft, address[] memory senders, address[] memory receivers, int64[] memory serials) public returns (int64 responseCode) { + uint256 length = nft.length; + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](length); + for (uint256 i = 0; i < length; i++) + { + IHederaTokenService.TokenTransferList memory airdrop; + airdrop.token = nft[i]; + IHederaTokenService.NftTransfer[] memory nftTransfers = new IHederaTokenService.NftTransfer[](1); + nftTransfers[0] = prepareNftTransfer(senders[i], receivers[i], serials[i]); + airdrop.nftTransfers = nftTransfers; + tokenTransfers[i] = airdrop; + } + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function tokenAirdropDistribute(address token, address sender, address[] memory receivers, int64 amount) public returns (int64 responseCode) { + uint256 length = receivers.length + 1; + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](1); + IHederaTokenService.TokenTransferList memory airdrop; + airdrop.token = token; + IHederaTokenService.AccountAmount memory senderAA; + senderAA.accountID = sender; + int64 totalAmount = 0; + for (uint i = 0; i < receivers.length; i++) { + totalAmount += amount; + } + senderAA.amount = -totalAmount; + IHederaTokenService.AccountAmount[] memory transfers = new IHederaTokenService.AccountAmount[](length); + transfers[0] = senderAA; + for (uint i = 1; i < length; i++) + { + IHederaTokenService.AccountAmount memory receiverAA; + receiverAA.accountID = receivers[i]; + receiverAA.amount = amount; + transfers[i] = receiverAA; + } + airdrop.transfers = transfers; + tokenTransfers[0] = airdrop; + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function nftAirdropDistribute(address token, address sender, address[] memory receivers) public returns (int64 responseCode) { + uint256 length = receivers.length; + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](1); + IHederaTokenService.TokenTransferList memory airdrop; + airdrop.token = token; + IHederaTokenService.NftTransfer[] memory nftTransfers = new IHederaTokenService.NftTransfer[](length); + for (uint i = 0; i < length; i++) { + int64 serial = 1; + nftTransfers[i] = prepareNftTransfer(sender, receivers[i], serial); + serial++; + } + airdrop.nftTransfers = nftTransfers; + tokenTransfers[0] = airdrop; + + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function mixedAirdrop(address[] memory token, address[] memory nft, address[] memory tokenSenders, address[] memory tokenReceivers, address[] memory nftSenders, address[] memory nftReceivers, int64 tokenAmount, int64[] memory serials) public returns (int64 responseCode) { + uint256 length = tokenSenders.length + nftSenders.length; + IHederaTokenService.TokenTransferList[] memory tokenTransfers = new IHederaTokenService.TokenTransferList[](length); + for (uint i = 0; i < tokenSenders.length; i++) + { + IHederaTokenService.TokenTransferList memory airdrop; + airdrop.token = token[i]; + airdrop.transfers = prepareAA(tokenSenders[i], tokenReceivers[i], tokenAmount); + tokenTransfers[i] = airdrop; + } + uint nftIndex = tokenSenders.length; + for (uint v = 0; nftIndex < length; v++) + { + IHederaTokenService.TokenTransferList memory airdrop; + airdrop.token = nft[v]; + IHederaTokenService.NftTransfer[] memory nftTransfers = new IHederaTokenService.NftTransfer[](1); + nftTransfers[0] = prepareNftTransfer(nftSenders[v], nftReceivers[v], serials[v]); + airdrop.nftTransfers = nftTransfers; + tokenTransfers[nftIndex] = airdrop; + nftIndex++; + } + responseCode = airdropTokens(tokenTransfers); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function prepareAA(address sender, address receiver, int64 amount) internal pure returns (IHederaTokenService.AccountAmount[] memory transfers) { + IHederaTokenService.AccountAmount memory aa1; + aa1.accountID = sender; + aa1.amount = -amount; + IHederaTokenService.AccountAmount memory aa2; + aa2.accountID = receiver; + aa2.amount = amount; + transfers = new IHederaTokenService.AccountAmount[](2); + transfers[0] = aa1; + transfers[1] = aa2; + return transfers; + } + + function prepareNftTransfer(address sender, address receiver, int64 serial) internal pure returns (IHederaTokenService.NftTransfer memory nftTransfer) { + nftTransfer.senderAccountID = sender; + nftTransfer.receiverAccountID = receiver; + nftTransfer.serialNumber = serial; + return nftTransfer; + } +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.bin new file mode 100644 index 000000000000..6ba2a760b8d2 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506115f6806100206000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c806304e74b561461006757806315dacbea14610097578063618dc65e146100c7578063660297ab146100f85780638f68e13c146101285780639b23d3d914610158575b600080fd5b610081600480360381019061007c9190610bb9565b610188565b60405161008e9190610c28565b60405180910390f35b6100b160048036038101906100ac9190610c79565b6102d9565b6040516100be9190610c28565b60405180910390f35b6100e160048036038101906100dc9190610e26565b6103f7565b6040516100ef929190610f1a565b60405180910390f35b610112600480360381019061010d9190611101565b610553565b60405161011f9190610c28565b60405180910390f35b610142600480360381019061013d91906111d8565b610743565b60405161014f9190610c28565b60405180910390f35b610172600480360381019061016d9190610c79565b6108a7565b60405161017f9190610c28565b60405180910390f35b600080600167ffffffffffffffff8111156101a6576101a5610cfb565b5b6040519080825280602002602001820160405280156101df57816020015b6101cc610ada565b8152602001906001900390816101c45790505b5090506101ea610ada565b85816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505084816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083816040019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505080826000815181106102a7576102a661123f565b5b60200260200101819052506102bb826109c5565b9250601660030b8360070b146102d057600080fd5b50509392505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff166315dacbea60e01b88888888604051602401610316949392919061128c565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050604051610380919061130d565b6000604051808303816000865af19150503d80600081146103bd576040519150601f19603f3d011682016040523d82523d6000602084013e6103c2565b606091505b5091509150816103d35760156103e8565b808060200190518101906103e7919061135d565b5b60030b92505050949350505050565b6000606060008061016773ffffffffffffffffffffffffffffffffffffffff1663618dc65e60e01b878760405160240161043292919061138a565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161049c919061130d565b6000604051808303816000865af19150503d80600081146104d9576040519150601f19603f3d011682016040523d82523d6000602084013e6104de565b606091505b50915091507f4af4780e06fe8cb9df64b0794fa6f01399af979175bb988e35e0e57e594567bc82826040516105149291906113d5565b60405180910390a1816105385760156040518060200160405280600081525061053c565b6016815b8160030b9150809450819550505050509250929050565b6000808551905060008167ffffffffffffffff81111561057657610575610cfb565b5b6040519080825280602002602001820160405280156105af57816020015b61059c610ada565b8152602001906001900390816105945790505b50905060005b8281101561071a576105c5610ada565b8882815181106105d8576105d761123f565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505087828151811061062a5761062961123f565b5b6020026020010151816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505086828151811061067c5761067b61123f565b5b6020026020010151816040019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250508582815181106106ce576106cd61123f565b5b6020026020010151816060019060070b908160070b81525050808383815181106106fb576106fa61123f565b5b602002602001018190525050808061071290611434565b9150506105b5565b50610724816109c5565b9250601660030b8360070b1461073957600080fd5b5050949350505050565b600080600167ffffffffffffffff81111561076157610760610cfb565b5b60405190808252806020026020018201604052801561079a57816020015b610787610ada565b81526020019060019003908161077f5790505b5090506107a5610ada565b86816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505085816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505084816040019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083816060019060070b908160070b8152505080826000815181106108745761087361123f565b5b6020026020010181905250610888826109c5565b9250601660030b8360070b1461089d57600080fd5b5050949350505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff16639b23d3d960e01b888888886040516024016108e4949392919061128c565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161094e919061130d565b6000604051808303816000865af19150503d806000811461098b576040519150601f19603f3d011682016040523d82523d6000602084013e610990565b606091505b5091509150816109a15760156109b6565b808060200190518101906109b5919061135d565b5b60030b92505050949350505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff1663012ebcaf60e01b856040516024016109fc919061159e565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050604051610a66919061130d565b6000604051808303816000865af19150503d8060008114610aa3576040519150601f19603f3d011682016040523d82523d6000602084013e610aa8565b606091505b509150915081610ab9576015610ace565b80806020019051810190610acd919061135d565b5b60030b92505050919050565b6040518060800160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff168152602001600060070b81525090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610b8682610b5b565b9050919050565b610b9681610b7b565b8114610ba157600080fd5b50565b600081359050610bb381610b8d565b92915050565b600080600060608486031215610bd257610bd1610b51565b5b6000610be086828701610ba4565b9350506020610bf186828701610ba4565b9250506040610c0286828701610ba4565b9150509250925092565b60008160070b9050919050565b610c2281610c0c565b82525050565b6000602082019050610c3d6000830184610c19565b92915050565b6000819050919050565b610c5681610c43565b8114610c6157600080fd5b50565b600081359050610c7381610c4d565b92915050565b60008060008060808587031215610c9357610c92610b51565b5b6000610ca187828801610ba4565b9450506020610cb287828801610ba4565b9350506040610cc387828801610ba4565b9250506060610cd487828801610c64565b91505092959194509250565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b610d3382610cea565b810181811067ffffffffffffffff82111715610d5257610d51610cfb565b5b80604052505050565b6000610d65610b47565b9050610d718282610d2a565b919050565b600067ffffffffffffffff821115610d9157610d90610cfb565b5b610d9a82610cea565b9050602081019050919050565b82818337600083830152505050565b6000610dc9610dc484610d76565b610d5b565b905082815260208101848484011115610de557610de4610ce5565b5b610df0848285610da7565b509392505050565b600082601f830112610e0d57610e0c610ce0565b5b8135610e1d848260208601610db6565b91505092915050565b60008060408385031215610e3d57610e3c610b51565b5b6000610e4b85828601610ba4565b925050602083013567ffffffffffffffff811115610e6c57610e6b610b56565b5b610e7885828601610df8565b9150509250929050565b6000819050919050565b610e9581610e82565b82525050565b600081519050919050565b600082825260208201905092915050565b60005b83811015610ed5578082015181840152602081019050610eba565b60008484015250505050565b6000610eec82610e9b565b610ef68185610ea6565b9350610f06818560208601610eb7565b610f0f81610cea565b840191505092915050565b6000604082019050610f2f6000830185610e8c565b8181036020830152610f418184610ee1565b90509392505050565b600067ffffffffffffffff821115610f6557610f64610cfb565b5b602082029050602081019050919050565b600080fd5b6000610f8e610f8984610f4a565b610d5b565b90508083825260208201905060208402830185811115610fb157610fb0610f76565b5b835b81811015610fda5780610fc68882610ba4565b845260208401935050602081019050610fb3565b5050509392505050565b600082601f830112610ff957610ff8610ce0565b5b8135611009848260208601610f7b565b91505092915050565b600067ffffffffffffffff82111561102d5761102c610cfb565b5b602082029050602081019050919050565b61104781610c0c565b811461105257600080fd5b50565b6000813590506110648161103e565b92915050565b600061107d61107884611012565b610d5b565b905080838252602082019050602084028301858111156110a05761109f610f76565b5b835b818110156110c957806110b58882611055565b8452602084019350506020810190506110a2565b5050509392505050565b600082601f8301126110e8576110e7610ce0565b5b81356110f884826020860161106a565b91505092915050565b6000806000806080858703121561111b5761111a610b51565b5b600085013567ffffffffffffffff81111561113957611138610b56565b5b61114587828801610fe4565b945050602085013567ffffffffffffffff81111561116657611165610b56565b5b61117287828801610fe4565b935050604085013567ffffffffffffffff81111561119357611192610b56565b5b61119f87828801610fe4565b925050606085013567ffffffffffffffff8111156111c0576111bf610b56565b5b6111cc878288016110d3565b91505092959194509250565b600080600080608085870312156111f2576111f1610b51565b5b600061120087828801610ba4565b945050602061121187828801610ba4565b935050604061122287828801610ba4565b925050606061123387828801611055565b91505092959194509250565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b61127781610b7b565b82525050565b61128681610c43565b82525050565b60006080820190506112a1600083018761126e565b6112ae602083018661126e565b6112bb604083018561126e565b6112c8606083018461127d565b95945050505050565b600081905092915050565b60006112e782610e9b565b6112f181856112d1565b9350611301818560208601610eb7565b80840191505092915050565b600061131982846112dc565b915081905092915050565b60008160030b9050919050565b61133a81611324565b811461134557600080fd5b50565b60008151905061135781611331565b92915050565b60006020828403121561137357611372610b51565b5b600061138184828501611348565b91505092915050565b600060408201905061139f600083018561126e565b81810360208301526113b18184610ee1565b90509392505050565b60008115159050919050565b6113cf816113ba565b82525050565b60006040820190506113ea60008301856113c6565b81810360208301526113fc8184610ee1565b90509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600061143f82610c43565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361147157611470611405565b5b600182019050919050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6114b181610b7b565b82525050565b6114c081610c0c565b82525050565b6080820160008201516114dc60008501826114a8565b5060208201516114ef60208501826114a8565b50604082015161150260408501826114a8565b50606082015161151560608501826114b7565b50505050565b600061152783836114c6565b60808301905092915050565b6000602082019050919050565b600061154b8261147c565b6115558185611487565b935061156083611498565b8060005b83811015611591578151611578888261151b565b975061158383611533565b925050600181019050611564565b5085935050505092915050565b600060208201905081810360008301526115b88184611540565b90509291505056fea2646970667358221220c5d60d8181e22ee9d2bb3483d80e127d3f2e1fc8c2a3f77e6c76984092da074d64736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.json b/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.json new file mode 100644 index 000000000000..083648cd3f13 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.json @@ -0,0 +1,215 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "bool", + "name": "", + "type": "bool" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "CallResponseEvent", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "address", + "name": "token", + "type": "address" + } + ], + "name": "cancelAirdrop", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address[]", + "name": "senders", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "receivers", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "tokens", + "type": "address[]" + }, + { + "internalType": "int64[]", + "name": "serials", + "type": "int64[]" + } + ], + "name": "cancelAirdrops", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "int64", + "name": "serial", + "type": "int64" + } + ], + "name": "cancelNFTAirdrop", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "bytes", + "name": "encodedFunctionSelector", + "type": "bytes" + } + ], + "name": "redirectForToken", + "outputs": [ + { + "internalType": "int256", + "name": "responseCode", + "type": "int256" + }, + { + "internalType": "bytes", + "name": "response", + "type": "bytes" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "serialNumber", + "type": "uint256" + } + ], + "name": "transferFromNFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.sol new file mode 100644 index 000000000000..11a4f8e0fb07 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/CancelAirdrop/CancelAirdrop.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity >=0.5.0 <0.9.0; +pragma experimental ABIEncoderV2; + +import "./HederaTokenService.sol"; + +contract CancelAirdrop is HederaTokenService { + + function cancelAirdrop(address sender, address receiver, address token) public returns(int64 responseCode){ + IHederaTokenService.PendingAirdrop[] memory pendingAirdrops = new IHederaTokenService.PendingAirdrop[](1); + + IHederaTokenService.PendingAirdrop memory pendingAirdrop; + pendingAirdrop.sender = sender; + pendingAirdrop.receiver = receiver; + pendingAirdrop.token = token; + + pendingAirdrops[0] = pendingAirdrop; + + responseCode = cancelAirdrops(pendingAirdrops); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function cancelNFTAirdrop(address sender, address receiver, address token, int64 serial) public returns(int64 responseCode){ + IHederaTokenService.PendingAirdrop[] memory pendingAirdrops = new IHederaTokenService.PendingAirdrop[](1); + + IHederaTokenService.PendingAirdrop memory pendingAirdrop; + pendingAirdrop.sender = sender; + pendingAirdrop.receiver = receiver; + pendingAirdrop.token = token; + pendingAirdrop.serial = serial; + + pendingAirdrops[0] = pendingAirdrop; + + responseCode = cancelAirdrops(pendingAirdrops); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function cancelAirdrops(address[] memory senders, address[] memory receivers, address[] memory tokens, int64[] memory serials) public returns (int64 responseCode) { + uint length = senders.length; + IHederaTokenService.PendingAirdrop[] memory pendingAirdrops = new IHederaTokenService.PendingAirdrop[](length); + for (uint i = 0; i < length; i++) { + IHederaTokenService.PendingAirdrop memory pendingAirdrop; + pendingAirdrop.sender = senders[i]; + pendingAirdrop.receiver = receivers[i]; + pendingAirdrop.token = tokens[i]; + pendingAirdrop.serial = serials[i]; + + pendingAirdrops[i] = pendingAirdrop; + } + + responseCode = cancelAirdrops(pendingAirdrops); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.bin new file mode 100644 index 000000000000..abdf3b1677a1 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506115f6806100206000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c806315dacbea14610067578063482bf6a4146100975780635e1c75e1146100c7578063618dc65e146100f757806370b25e5c146101285780639b23d3d914610158575b600080fd5b610081600480360381019061007c9190610bef565b610188565b60405161008e9190610c72565b60405180910390f35b6100b160048036038101906100ac9190610cb9565b6102a6565b6040516100be9190610c72565b60405180910390f35b6100e160048036038101906100dc9190610d20565b61040a565b6040516100ee9190610c72565b60405180910390f35b610111600480360381019061010c9190610eb9565b61055b565b60405161011f929190610fad565b60405180910390f35b610142600480360381019061013d9190611168565b6106b7565b60405161014f9190610c72565b60405180910390f35b610172600480360381019061016d9190610bef565b6108a7565b60405161017f9190610c72565b60405180910390f35b600080600061016773ffffffffffffffffffffffffffffffffffffffff166315dacbea60e01b888888886040516024016101c5949392919061125d565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161022f91906112de565b6000604051808303816000865af19150503d806000811461026c576040519150601f19603f3d011682016040523d82523d6000602084013e610271565b606091505b509150915081610282576015610297565b80806020019051810190610296919061132e565b5b60030b92505050949350505050565b600080600167ffffffffffffffff8111156102c4576102c3610d8e565b5b6040519080825280602002602001820160405280156102fd57816020015b6102ea610ada565b8152602001906001900390816102e25790505b509050610308610ada565b86816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505085816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505084816040019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083816060019060070b908160070b8152505080826000815181106103d7576103d661135b565b5b60200260200101819052506103eb826109c5565b9250601660030b8360070b1461040057600080fd5b5050949350505050565b600080600167ffffffffffffffff81111561042857610427610d8e565b5b60405190808252806020026020018201604052801561046157816020015b61044e610ada565b8152602001906001900390816104465790505b50905061046c610ada565b85816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505084816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083816040019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505080826000815181106105295761052861135b565b5b602002602001018190525061053d826109c5565b9250601660030b8360070b1461055257600080fd5b50509392505050565b6000606060008061016773ffffffffffffffffffffffffffffffffffffffff1663618dc65e60e01b878760405160240161059692919061138a565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161060091906112de565b6000604051808303816000865af19150503d806000811461063d576040519150601f19603f3d011682016040523d82523d6000602084013e610642565b606091505b50915091507f4af4780e06fe8cb9df64b0794fa6f01399af979175bb988e35e0e57e594567bc82826040516106789291906113d5565b60405180910390a18161069c576015604051806020016040528060008152506106a0565b6016815b8160030b9150809450819550505050509250929050565b6000808551905060008167ffffffffffffffff8111156106da576106d9610d8e565b5b60405190808252806020026020018201604052801561071357816020015b610700610ada565b8152602001906001900390816106f85790505b50905060005b8281101561087e57610729610ada565b88828151811061073c5761073b61135b565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505087828151811061078e5761078d61135b565b5b6020026020010151816020019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250508682815181106107e0576107df61135b565b5b6020026020010151816040019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250508582815181106108325761083161135b565b5b6020026020010151816060019060070b908160070b815250508083838151811061085f5761085e61135b565b5b602002602001018190525050808061087690611434565b915050610719565b50610888816109c5565b9250601660030b8360070b1461089d57600080fd5b5050949350505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff16639b23d3d960e01b888888886040516024016108e4949392919061125d565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161094e91906112de565b6000604051808303816000865af19150503d806000811461098b576040519150601f19603f3d011682016040523d82523d6000602084013e610990565b606091505b5091509150816109a15760156109b6565b808060200190518101906109b5919061132e565b5b60030b92505050949350505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff16630596164160e01b856040516024016109fc919061159e565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050604051610a6691906112de565b6000604051808303816000865af19150503d8060008114610aa3576040519150601f19603f3d011682016040523d82523d6000602084013e610aa8565b606091505b509150915081610ab9576015610ace565b80806020019051810190610acd919061132e565b5b60030b92505050919050565b6040518060800160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff168152602001600060070b81525090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610b8682610b5b565b9050919050565b610b9681610b7b565b8114610ba157600080fd5b50565b600081359050610bb381610b8d565b92915050565b6000819050919050565b610bcc81610bb9565b8114610bd757600080fd5b50565b600081359050610be981610bc3565b92915050565b60008060008060808587031215610c0957610c08610b51565b5b6000610c1787828801610ba4565b9450506020610c2887828801610ba4565b9350506040610c3987828801610ba4565b9250506060610c4a87828801610bda565b91505092959194509250565b60008160070b9050919050565b610c6c81610c56565b82525050565b6000602082019050610c876000830184610c63565b92915050565b610c9681610c56565b8114610ca157600080fd5b50565b600081359050610cb381610c8d565b92915050565b60008060008060808587031215610cd357610cd2610b51565b5b6000610ce187828801610ba4565b9450506020610cf287828801610ba4565b9350506040610d0387828801610ba4565b9250506060610d1487828801610ca4565b91505092959194509250565b600080600060608486031215610d3957610d38610b51565b5b6000610d4786828701610ba4565b9350506020610d5886828701610ba4565b9250506040610d6986828701610ba4565b9150509250925092565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b610dc682610d7d565b810181811067ffffffffffffffff82111715610de557610de4610d8e565b5b80604052505050565b6000610df8610b47565b9050610e048282610dbd565b919050565b600067ffffffffffffffff821115610e2457610e23610d8e565b5b610e2d82610d7d565b9050602081019050919050565b82818337600083830152505050565b6000610e5c610e5784610e09565b610dee565b905082815260208101848484011115610e7857610e77610d78565b5b610e83848285610e3a565b509392505050565b600082601f830112610ea057610e9f610d73565b5b8135610eb0848260208601610e49565b91505092915050565b60008060408385031215610ed057610ecf610b51565b5b6000610ede85828601610ba4565b925050602083013567ffffffffffffffff811115610eff57610efe610b56565b5b610f0b85828601610e8b565b9150509250929050565b6000819050919050565b610f2881610f15565b82525050565b600081519050919050565b600082825260208201905092915050565b60005b83811015610f68578082015181840152602081019050610f4d565b60008484015250505050565b6000610f7f82610f2e565b610f898185610f39565b9350610f99818560208601610f4a565b610fa281610d7d565b840191505092915050565b6000604082019050610fc26000830185610f1f565b8181036020830152610fd48184610f74565b90509392505050565b600067ffffffffffffffff821115610ff857610ff7610d8e565b5b602082029050602081019050919050565b600080fd5b600061102161101c84610fdd565b610dee565b9050808382526020820190506020840283018581111561104457611043611009565b5b835b8181101561106d57806110598882610ba4565b845260208401935050602081019050611046565b5050509392505050565b600082601f83011261108c5761108b610d73565b5b813561109c84826020860161100e565b91505092915050565b600067ffffffffffffffff8211156110c0576110bf610d8e565b5b602082029050602081019050919050565b60006110e46110df846110a5565b610dee565b9050808382526020820190506020840283018581111561110757611106611009565b5b835b81811015611130578061111c8882610ca4565b845260208401935050602081019050611109565b5050509392505050565b600082601f83011261114f5761114e610d73565b5b813561115f8482602086016110d1565b91505092915050565b6000806000806080858703121561118257611181610b51565b5b600085013567ffffffffffffffff8111156111a05761119f610b56565b5b6111ac87828801611077565b945050602085013567ffffffffffffffff8111156111cd576111cc610b56565b5b6111d987828801611077565b935050604085013567ffffffffffffffff8111156111fa576111f9610b56565b5b61120687828801611077565b925050606085013567ffffffffffffffff81111561122757611226610b56565b5b6112338782880161113a565b91505092959194509250565b61124881610b7b565b82525050565b61125781610bb9565b82525050565b6000608082019050611272600083018761123f565b61127f602083018661123f565b61128c604083018561123f565b611299606083018461124e565b95945050505050565b600081905092915050565b60006112b882610f2e565b6112c281856112a2565b93506112d2818560208601610f4a565b80840191505092915050565b60006112ea82846112ad565b915081905092915050565b60008160030b9050919050565b61130b816112f5565b811461131657600080fd5b50565b60008151905061132881611302565b92915050565b60006020828403121561134457611343610b51565b5b600061135284828501611319565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060408201905061139f600083018561123f565b81810360208301526113b18184610f74565b90509392505050565b60008115159050919050565b6113cf816113ba565b82525050565b60006040820190506113ea60008301856113c6565b81810360208301526113fc8184610f74565b90509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600061143f82610bb9565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361147157611470611405565b5b600182019050919050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6114b181610b7b565b82525050565b6114c081610c56565b82525050565b6080820160008201516114dc60008501826114a8565b5060208201516114ef60208501826114a8565b50604082015161150260408501826114a8565b50606082015161151560608501826114b7565b50505050565b600061152783836114c6565b60808301905092915050565b6000602082019050919050565b600061154b8261147c565b6115558185611487565b935061156083611498565b8060005b83811015611591578151611578888261151b565b975061158383611533565b925050600181019050611564565b5085935050505092915050565b600060208201905081810360008301526115b88184611540565b90509291505056fea2646970667358221220f12e32af50d51ffc5be75444f1e0048a484234296fc93277f8077bf3be18fcc364736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.json b/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.json new file mode 100644 index 000000000000..ae2ebb5c53b4 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.json @@ -0,0 +1,215 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "bool", + "name": "", + "type": "bool" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "CallResponseEvent", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "address", + "name": "token", + "type": "address" + } + ], + "name": "claim", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address[]", + "name": "senders", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "receivers", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "tokens", + "type": "address[]" + }, + { + "internalType": "int64[]", + "name": "serials", + "type": "int64[]" + } + ], + "name": "claimAirdrops", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "int64", + "name": "serial", + "type": "int64" + } + ], + "name": "claimNFTAirdrop", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "bytes", + "name": "encodedFunctionSelector", + "type": "bytes" + } + ], + "name": "redirectForToken", + "outputs": [ + { + "internalType": "int256", + "name": "responseCode", + "type": "int256" + }, + { + "internalType": "bytes", + "name": "response", + "type": "bytes" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "serialNumber", + "type": "uint256" + } + ], + "name": "transferFromNFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.sol new file mode 100644 index 000000000000..779a30a9b37e --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/ClaimAirdrop/ClaimAirdrop.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity >=0.5.0 <0.9.0; +pragma experimental ABIEncoderV2; + +import "./HederaTokenService.sol"; + +contract ClaimAirdrop is HederaTokenService { + + function claim(address sender, address receiver, address token) public returns(int64 responseCode){ + IHederaTokenService.PendingAirdrop[] memory pendingAirdrops = new IHederaTokenService.PendingAirdrop[](1); + + IHederaTokenService.PendingAirdrop memory pendingAirdrop; + pendingAirdrop.sender = sender; + pendingAirdrop.receiver = receiver; + pendingAirdrop.token = token; + + pendingAirdrops[0] = pendingAirdrop; + + responseCode = claimAirdrops(pendingAirdrops); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function claimNFTAirdrop(address sender, address receiver, address token, int64 serial) public returns(int64 responseCode){ + IHederaTokenService.PendingAirdrop[] memory pendingAirdrops = new IHederaTokenService.PendingAirdrop[](1); + + IHederaTokenService.PendingAirdrop memory pendingAirdrop; + pendingAirdrop.sender = sender; + pendingAirdrop.receiver = receiver; + pendingAirdrop.token = token; + pendingAirdrop.serial = serial; + + pendingAirdrops[0] = pendingAirdrop; + + responseCode = claimAirdrops(pendingAirdrops); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } + + function claimAirdrops(address[] memory senders, address[] memory receivers, address[] memory tokens, int64[] memory serials) public returns (int64 responseCode) { + uint length = senders.length; + IHederaTokenService.PendingAirdrop[] memory pendingAirdrops = new IHederaTokenService.PendingAirdrop[](length); + for (uint i = 0; i < length; i++) { + IHederaTokenService.PendingAirdrop memory pendingAirdrop; + pendingAirdrop.sender = senders[i]; + pendingAirdrop.receiver = receivers[i]; + pendingAirdrop.token = tokens[i]; + pendingAirdrop.serial = serials[i]; + + pendingAirdrops[i] = pendingAirdrop; + } + + responseCode = claimAirdrops(pendingAirdrops); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.bin new file mode 100644 index 000000000000..e593fa57b65c --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b50600436106100365760003560e01c806376c6b3911461003b578063a869c78a14610059575b600080fd5b610043610089565b60405161005091906101a2565b60405180910390f35b610073600480360381019061006e9190610356565b610101565b60405161008091906101a2565b60405180910390f35b60003073ffffffffffffffffffffffffffffffffffffffff166376c6b3916040518163ffffffff1660e01b81526004016020604051808303816000875af11580156100d8573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100fc91906103b4565b905090565b60003073ffffffffffffffffffffffffffffffffffffffff1663a869c78a836040518263ffffffff1660e01b815260040161013c919061049f565b6020604051808303816000875af115801561015b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061017f91906103b4565b9050919050565b60008160070b9050919050565b61019c81610186565b82525050565b60006020820190506101b76000830184610193565b92915050565b6000604051905090565b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61021f826101d6565b810181811067ffffffffffffffff8211171561023e5761023d6101e7565b5b80604052505050565b60006102516101bd565b905061025d8282610216565b919050565b600067ffffffffffffffff82111561027d5761027c6101e7565b5b602082029050602081019050919050565b600080fd5b61029c81610186565b81146102a757600080fd5b50565b6000813590506102b981610293565b92915050565b60006102d26102cd84610262565b610247565b905080838252602082019050602084028301858111156102f5576102f461028e565b5b835b8181101561031e578061030a88826102aa565b8452602084019350506020810190506102f7565b5050509392505050565b600082601f83011261033d5761033c6101d1565b5b813561034d8482602086016102bf565b91505092915050565b60006020828403121561036c5761036b6101c7565b5b600082013567ffffffffffffffff81111561038a576103896101cc565b5b61039684828501610328565b91505092915050565b6000815190506103ae81610293565b92915050565b6000602082840312156103ca576103c96101c7565b5b60006103d88482850161039f565b91505092915050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b61041681610186565b82525050565b6000610428838361040d565b60208301905092915050565b6000602082019050919050565b600061044c826103e1565b61045681856103ec565b9350610461836103fd565b8060005b83811015610492578151610479888261041c565b975061048483610434565b925050600181019050610465565b5085935050505092915050565b600060208201905081810360008301526104b98184610441565b90509291505056fea264697066735822122090079297d3821301f7590ff582a2d38263cf93cb98de3e7f4d3246266bd4951364736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.json b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.json new file mode 100644 index 000000000000..e3c00f29a926 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.json @@ -0,0 +1,34 @@ +[ + { + "inputs": [], + "name": "rejectTokenFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "int64[]", + "name": "serialNumbers", + "type": "int64[]" + } + ], + "name": "rejectTokenNFTs", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.sol new file mode 100644 index 000000000000..823039d25ea0 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904Reject/HRC904Reject.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity ^0.8.0; + +interface IHRC904TokenReject { + function rejectTokenFT() external returns (int64 responseCode); + function rejectTokenNFTs(int64[] memory serialNumbers) external returns (int64 responseCode); +} + +contract HRC904TokenReject is IHRC904TokenReject { + function rejectTokenFT() public returns (int64 responseCode) { + return HRC904TokenReject(this).rejectTokenFT(); + } + + function rejectTokenNFTs(int64[] memory serialNumbers) public returns (int64 responseCode) { + return HRC904TokenReject(this).rejectTokenNFTs(serialNumbers); + } +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.bin new file mode 100644 index 000000000000..a4d90d5c9ba9 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506103a6806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063ad4917cf1461003b578063cef5b7051461006b575b600080fd5b61005560048036038101906100509190610244565b61009b565b6040516100629190610293565b60405180910390f35b610085600480360381019061008091906102ae565b610123565b6040516100929190610293565b60405180910390f35b60003073ffffffffffffffffffffffffffffffffffffffff1663ad4917cf84846040518363ffffffff1660e01b81526004016100d89291906102ea565b6020604051808303816000875af11580156100f7573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061011b9190610328565b905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1663cef5b705836040518263ffffffff1660e01b815260040161015e9190610355565b6020604051808303816000875af115801561017d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101a19190610328565b9050919050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006101d8826101ad565b9050919050565b6101e8816101cd565b81146101f357600080fd5b50565b600081359050610205816101df565b92915050565b60008160070b9050919050565b6102218161020b565b811461022c57600080fd5b50565b60008135905061023e81610218565b92915050565b6000806040838503121561025b5761025a6101a8565b5b6000610269858286016101f6565b925050602061027a8582860161022f565b9150509250929050565b61028d8161020b565b82525050565b60006020820190506102a86000830184610284565b92915050565b6000602082840312156102c4576102c36101a8565b5b60006102d2848285016101f6565b91505092915050565b6102e4816101cd565b82525050565b60006040820190506102ff60008301856102db565b61030c6020830184610284565b9392505050565b60008151905061032281610218565b92915050565b60006020828403121561033e5761033d6101a8565b5b600061034c84828501610313565b91505092915050565b600060208201905061036a60008301846102db565b9291505056fea2646970667358221220f5eee7da8ac63e14ba98114de96d5ac10438085180aca5b0e83165db622198b264736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.json b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.json new file mode 100644 index 000000000000..f234594520c8 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.json @@ -0,0 +1,45 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "receiver", + "type": "address" + } + ], + "name": "cancelAirdropFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "receiver", + "type": "address" + }, + { + "internalType": "int64", + "name": "serial", + "type": "int64" + } + ], + "name": "cancelAirdropNFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.sol new file mode 100644 index 000000000000..df94cd6ed3fc --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenCancel/HRC904TokenCancel.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity ^0.8.0; + +interface IHRC904TokenCancel { + function cancelAirdropFT(address receiverAddress) external returns (int64 responseCode); + function cancelAirdropNFT(address receiverAddress, int64 serialNumber) external returns (int64 responseCode); +} + +contract HRC904TokenCancel is IHRC904TokenCancel{ + function cancelAirdropFT(address receiver) public returns (int64 responseCode) { + return IHRC904TokenCancel(this).cancelAirdropFT(receiver); + } + + function cancelAirdropNFT(address receiver, int64 serial) public returns (int64 responseCode) { + return IHRC904TokenCancel(this).cancelAirdropNFT(receiver, serial); + } +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.bin new file mode 100644 index 000000000000..2b13ea934ede --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506103a6806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806363ada5d71461003b578063a83bc5b21461006b575b600080fd5b61005560048036038101906100509190610244565b61009b565b6040516100629190610293565b60405180910390f35b610085600480360381019061008091906102ae565b610123565b6040516100929190610293565b60405180910390f35b60003073ffffffffffffffffffffffffffffffffffffffff166363ada5d784846040518363ffffffff1660e01b81526004016100d89291906102ea565b6020604051808303816000875af11580156100f7573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061011b9190610328565b905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1663a83bc5b2836040518263ffffffff1660e01b815260040161015e9190610355565b6020604051808303816000875af115801561017d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101a19190610328565b9050919050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006101d8826101ad565b9050919050565b6101e8816101cd565b81146101f357600080fd5b50565b600081359050610205816101df565b92915050565b60008160070b9050919050565b6102218161020b565b811461022c57600080fd5b50565b60008135905061023e81610218565b92915050565b6000806040838503121561025b5761025a6101a8565b5b6000610269858286016101f6565b925050602061027a8582860161022f565b9150509250929050565b61028d8161020b565b82525050565b60006020820190506102a86000830184610284565b92915050565b6000602082840312156102c4576102c36101a8565b5b60006102d2848285016101f6565b91505092915050565b6102e4816101cd565b82525050565b60006040820190506102ff60008301856102db565b61030c6020830184610284565b9392505050565b60008151905061032281610218565b92915050565b60006020828403121561033e5761033d6101a8565b5b600061034c84828501610313565b91505092915050565b600060208201905061036a60008301846102db565b9291505056fea26469706673582212202abfda89e671ab879c88bdd36665614ef3e65b7dee575a5336c7c7152df56dda64736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.json b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.json new file mode 100644 index 000000000000..be014264239d --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.json @@ -0,0 +1,45 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "claimAirdropFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "int64", + "name": "serial", + "type": "int64" + } + ], + "name": "claimAirdropNFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.sol new file mode 100644 index 000000000000..1099bbbc4294 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/HRC904TokenClaim/HRC904TokenClaim.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity ^0.8.0; + +interface IHRC904TokenClaim { + function claimAirdropFT(address senderAddress) external returns (int64 responseCode); + function claimAirdropNFT(address senderAddress, int64 serialNumber) external returns (int64 responseCode); +} + +contract HRC904TokenClaim is IHRC904TokenClaim{ + function claimAirdropFT(address sender) public returns (int64 responseCode) { + return IHRC904TokenClaim(this).claimAirdropFT(sender); + } + + function claimAirdropNFT(address sender, int64 serial) public returns (int64 responseCode) { + return IHRC904TokenClaim(this).claimAirdropNFT(sender, serial); + } +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC632/IHRC632.json b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC632/IHRC632.json index cfe9e8070875..e9ff2097d28e 100644 --- a/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC632/IHRC632.json +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC632/IHRC632.json @@ -1 +1 @@ -[{"inputs":[{"internalType":"address","name":"spender","type":"address"}],"name":"hbarAllowance","outputs":[{"internalType":"int64","name":"responseCode","type":"int64"},{"internalType":"int256","name":"allowance","type":"int256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"int256","name":"amount","type":"int256"}],"name":"hbarApprove","outputs":[{"internalType":"int64","name":"responseCode","type":"int64"}],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file +[{"inputs":[{"internalType":"address","name":"spender","type":"address"}],"name":"hbarAllowance","outputs":[{"internalType":"int64","name":"responseCode","type":"int64"},{"internalType":"int256","name":"allowance","type":"int256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"int256","name":"amount","type":"int256"}],"name":"hbarApprove","outputs":[{"internalType":"int64","name":"responseCode","type":"int64"}],"stateMutability":"nonpayable", "type":"function"}] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.bin new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.json b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.json new file mode 100644 index 000000000000..d21d29619e29 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.json @@ -0,0 +1,21 @@ +[ + { + "inputs": [ + { + "internalType": "bool", + "name": "enableAutoAssociations", + "type": "bool" + } + ], + "name": "setUnlimitedAutomaticAssociations", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.sol new file mode 100644 index 000000000000..336236f9d4f8 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/IHRC904UnlimitedAutoAssociations/IHRC904UnlimitedAutoAssociations.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity ^0.8.0; + +interface IHRC904UnlimitedAutoAssociations { + function setUnlimitedAutomaticAssociations(bool enableAutoAssociations) external returns (int64 responseCode); +} \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.bin b/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.bin new file mode 100644 index 000000000000..5090e0452fc4 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b5061108d806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c806315dacbea146100515780632d03d39014610081578063618dc65e146100b15780639b23d3d9146100e2575b600080fd5b61006b600480360381019061006691906107cf565b610112565b6040516100789190610852565b60405180910390f35b61009b600480360381019061009691906109c6565b610230565b6040516100a89190610852565b60405180910390f35b6100cb60048036038101906100c69190610b06565b61035f565b6040516100d9929190610bfa565b60405180910390f35b6100fc60048036038101906100f791906107cf565b6104bb565b6040516101099190610852565b60405180910390f35b600080600061016773ffffffffffffffffffffffffffffffffffffffff166315dacbea60e01b8888888860405160240161014f9493929190610c48565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516101b99190610cc9565b6000604051808303816000865af19150503d80600081146101f6576040519150601f19603f3d011682016040523d82523d6000602084013e6101fb565b606091505b50915091508161020c576015610221565b808060200190518101906102209190610d19565b5b60030b92505050949350505050565b600080825167ffffffffffffffff81111561024e5761024d610883565b5b60405190808252806020026020018201604052801561028757816020015b6102746106f4565b81526020019060019003908161026c5790505b50905060005b83518110156103365761029e6106f4565b8482815181106102b1576102b0610d46565b5b6020026020010151816000019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506001816020019060070b908160070b815250508083838151811061031757610316610d46565b5b602002602001018190525050808061032e90610da4565b91505061028d565b506103428585836105d9565b9150601660030b8260070b1461035757600080fd5b509392505050565b6000606060008061016773ffffffffffffffffffffffffffffffffffffffff1663618dc65e60e01b878760405160240161039a929190610dec565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516104049190610cc9565b6000604051808303816000865af19150503d8060008114610441576040519150601f19603f3d011682016040523d82523d6000602084013e610446565b606091505b50915091507f4af4780e06fe8cb9df64b0794fa6f01399af979175bb988e35e0e57e594567bc828260405161047c929190610e37565b60405180910390a1816104a0576015604051806020016040528060008152506104a4565b6016815b8160030b9150809450819550505050509250929050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff16639b23d3d960e01b888888886040516024016104f89493929190610c48565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516105629190610cc9565b6000604051808303816000865af19150503d806000811461059f576040519150601f19603f3d011682016040523d82523d6000602084013e6105a4565b606091505b5091509150816105b55760156105ca565b808060200190518101906105c99190610d19565b5b60030b92505050949350505050565b600080600061016773ffffffffffffffffffffffffffffffffffffffff1663ebd595e060e01b87878760405160240161061493929190611012565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161067e9190610cc9565b6000604051808303816000865af19150503d80600081146106bb576040519150601f19603f3d011682016040523d82523d6000602084013e6106c0565b606091505b5091509150816106d15760156106e6565b808060200190518101906106e59190610d19565b5b60030b925050509392505050565b6040518060400160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600060070b81525090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006107668261073b565b9050919050565b6107768161075b565b811461078157600080fd5b50565b6000813590506107938161076d565b92915050565b6000819050919050565b6107ac81610799565b81146107b757600080fd5b50565b6000813590506107c9816107a3565b92915050565b600080600080608085870312156107e9576107e8610731565b5b60006107f787828801610784565b945050602061080887828801610784565b935050604061081987828801610784565b925050606061082a878288016107ba565b91505092959194509250565b60008160070b9050919050565b61084c81610836565b82525050565b60006020820190506108676000830184610843565b92915050565b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6108bb82610872565b810181811067ffffffffffffffff821117156108da576108d9610883565b5b80604052505050565b60006108ed610727565b90506108f982826108b2565b919050565b600067ffffffffffffffff82111561091957610918610883565b5b602082029050602081019050919050565b600080fd5b600061094261093d846108fe565b6108e3565b905080838252602082019050602084028301858111156109655761096461092a565b5b835b8181101561098e578061097a8882610784565b845260208401935050602081019050610967565b5050509392505050565b600082601f8301126109ad576109ac61086d565b5b81356109bd84826020860161092f565b91505092915050565b6000806000606084860312156109df576109de610731565b5b60006109ed86828701610784565b935050602084013567ffffffffffffffff811115610a0e57610a0d610736565b5b610a1a86828701610998565b925050604084013567ffffffffffffffff811115610a3b57610a3a610736565b5b610a4786828701610998565b9150509250925092565b600080fd5b600067ffffffffffffffff821115610a7157610a70610883565b5b610a7a82610872565b9050602081019050919050565b82818337600083830152505050565b6000610aa9610aa484610a56565b6108e3565b905082815260208101848484011115610ac557610ac4610a51565b5b610ad0848285610a87565b509392505050565b600082601f830112610aed57610aec61086d565b5b8135610afd848260208601610a96565b91505092915050565b60008060408385031215610b1d57610b1c610731565b5b6000610b2b85828601610784565b925050602083013567ffffffffffffffff811115610b4c57610b4b610736565b5b610b5885828601610ad8565b9150509250929050565b6000819050919050565b610b7581610b62565b82525050565b600081519050919050565b600082825260208201905092915050565b60005b83811015610bb5578082015181840152602081019050610b9a565b60008484015250505050565b6000610bcc82610b7b565b610bd68185610b86565b9350610be6818560208601610b97565b610bef81610872565b840191505092915050565b6000604082019050610c0f6000830185610b6c565b8181036020830152610c218184610bc1565b90509392505050565b610c338161075b565b82525050565b610c4281610799565b82525050565b6000608082019050610c5d6000830187610c2a565b610c6a6020830186610c2a565b610c776040830185610c2a565b610c846060830184610c39565b95945050505050565b600081905092915050565b6000610ca382610b7b565b610cad8185610c8d565b9350610cbd818560208601610b97565b80840191505092915050565b6000610cd58284610c98565b915081905092915050565b60008160030b9050919050565b610cf681610ce0565b8114610d0157600080fd5b50565b600081519050610d1381610ced565b92915050565b600060208284031215610d2f57610d2e610731565b5b6000610d3d84828501610d04565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610daf82610799565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610de157610de0610d75565b5b600182019050919050565b6000604082019050610e016000830185610c2a565b8181036020830152610e138184610bc1565b90509392505050565b60008115159050919050565b610e3181610e1c565b82525050565b6000604082019050610e4c6000830185610e28565b8181036020830152610e5e8184610bc1565b90509392505050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610e9c8161075b565b82525050565b6000610eae8383610e93565b60208301905092915050565b6000602082019050919050565b6000610ed282610e67565b610edc8185610e72565b9350610ee783610e83565b8060005b83811015610f18578151610eff8882610ea2565b9750610f0a83610eba565b925050600181019050610eeb565b5085935050505092915050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610f5a81610836565b82525050565b604082016000820151610f766000850182610e93565b506020820151610f896020850182610f51565b50505050565b6000610f9b8383610f60565b60408301905092915050565b6000602082019050919050565b6000610fbf82610f25565b610fc98185610f30565b9350610fd483610f41565b8060005b83811015611005578151610fec8882610f8f565b9750610ff783610fa7565b925050600181019050610fd8565b5085935050505092915050565b60006060820190506110276000830186610c2a565b81810360208301526110398185610ec7565b9050818103604083015261104d8184610fb4565b905094935050505056fea2646970667358221220d250baf2969ae38cbdd5b139677c6d86b83ae55c63a9c887b987371f75cb30a464736f6c63430008120033 \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.json b/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.json new file mode 100644 index 000000000000..943654a29e89 --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.json @@ -0,0 +1,147 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "bool", + "name": "", + "type": "bool" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "CallResponseEvent", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "bytes", + "name": "encodedFunctionSelector", + "type": "bytes" + } + ], + "name": "redirectForToken", + "outputs": [ + { + "internalType": "int256", + "name": "responseCode", + "type": "int256" + }, + { + "internalType": "bytes", + "name": "response", + "type": "bytes" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "rejectingAddress", + "type": "address" + }, + { + "internalType": "address[]", + "name": "ftAddresses", + "type": "address[]" + }, + { + "internalType": "address[]", + "name": "nftAddresses", + "type": "address[]" + } + ], + "name": "rejectTokens", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "serialNumber", + "type": "uint256" + } + ], + "name": "transferFromNFT", + "outputs": [ + { + "internalType": "int64", + "name": "responseCode", + "type": "int64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.sol b/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.sol new file mode 100644 index 000000000000..8d8fb722afab --- /dev/null +++ b/hedera-node/test-clients/src/main/resources/contract/contracts/TokenReject/TokenReject.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +pragma solidity >=0.5.0 <0.9.0; +pragma experimental ABIEncoderV2; + +import "./HederaTokenService.sol"; + +contract TokenReject is HederaTokenService { + + function rejectTokens(address rejectingAddress, address[] memory ftAddresses, address[] memory nftAddresses) public returns(int64 responseCode) { + IHederaTokenService.NftID[] memory nftIDs = new IHederaTokenService.NftID[](nftAddresses.length); + for (uint i; i < nftAddresses.length; i++) + { + IHederaTokenService.NftID memory nftId; + nftId.nft = nftAddresses[i]; + nftId.serial = 1; + nftIDs[i] = nftId; + } + responseCode = rejectTokens(rejectingAddress, ftAddresses, nftIDs); + if (responseCode != HederaResponseCodes.SUCCESS) { + revert(); + } + return responseCode; + } +} \ No newline at end of file diff --git a/platform-sdk/consensus-gossip-impl/build.gradle.kts b/platform-sdk/consensus-gossip-impl/build.gradle.kts new file mode 100644 index 000000000000..3310d8eeef45 --- /dev/null +++ b/platform-sdk/consensus-gossip-impl/build.gradle.kts @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + id("com.hedera.gradle.services") + id("com.hedera.gradle.services-publish") +} + +description = "Default Consensus Gossip Implementation" diff --git a/platform-sdk/consensus-gossip-impl/src/main/java/module-info.java b/platform-sdk/consensus-gossip-impl/src/main/java/module-info.java new file mode 100644 index 000000000000..bcc50c095187 --- /dev/null +++ b/platform-sdk/consensus-gossip-impl/src/main/java/module-info.java @@ -0,0 +1,6 @@ +module org.hiero.consensus.gossip.impl { + requires transitive org.hiero.consensus.gossip; + + provides org.hiero.consensus.gossip.Gossip with + org.hiero.consensus.gossip.impl.GossipImpl; +} diff --git a/platform-sdk/consensus-gossip-impl/src/main/java/org/hiero/consensus/gossip/impl/GossipImpl.java b/platform-sdk/consensus-gossip-impl/src/main/java/org/hiero/consensus/gossip/impl/GossipImpl.java new file mode 100644 index 000000000000..5b01daab8875 --- /dev/null +++ b/platform-sdk/consensus-gossip-impl/src/main/java/org/hiero/consensus/gossip/impl/GossipImpl.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.hiero.consensus.gossip.impl; + +import org.hiero.consensus.gossip.Gossip; + +/** + * Implementation for the mock gossip service. + */ +public final class GossipImpl implements Gossip {} diff --git a/platform-sdk/consensus-gossip/build.gradle.kts b/platform-sdk/consensus-gossip/build.gradle.kts new file mode 100644 index 000000000000..d00919743eeb --- /dev/null +++ b/platform-sdk/consensus-gossip/build.gradle.kts @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + id("com.hedera.gradle.services") + id("com.hedera.gradle.services-publish") +} + +description = "Consensus Gossip API" diff --git a/platform-sdk/consensus-gossip/src/main/java/module-info.java b/platform-sdk/consensus-gossip/src/main/java/module-info.java new file mode 100644 index 000000000000..5e503ba7fae2 --- /dev/null +++ b/platform-sdk/consensus-gossip/src/main/java/module-info.java @@ -0,0 +1,6 @@ +module org.hiero.consensus.gossip { + exports org.hiero.consensus.gossip; + + requires transitive com.swirlds.common; + requires static com.github.spotbugs.annotations; +} diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManager.java b/platform-sdk/consensus-gossip/src/main/java/org/hiero/consensus/gossip/FallenBehindManager.java similarity index 98% rename from platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManager.java rename to platform-sdk/consensus-gossip/src/main/java/org/hiero/consensus/gossip/FallenBehindManager.java index 2b1b65e4c879..8da9a6a71348 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManager.java +++ b/platform-sdk/consensus-gossip/src/main/java/org/hiero/consensus/gossip/FallenBehindManager.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.swirlds.platform.gossip; +package org.hiero.consensus.gossip; import com.swirlds.common.platform.NodeId; import edu.umd.cs.findbugs.annotations.NonNull; diff --git a/platform-sdk/consensus-gossip/src/main/java/org/hiero/consensus/gossip/Gossip.java b/platform-sdk/consensus-gossip/src/main/java/org/hiero/consensus/gossip/Gossip.java new file mode 100644 index 000000000000..d8f1643aa10a --- /dev/null +++ b/platform-sdk/consensus-gossip/src/main/java/org/hiero/consensus/gossip/Gossip.java @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.hiero.consensus.gossip; + +/** + * Mock gossip. + */ +public interface Gossip {} diff --git a/platform-sdk/docs/proposals/TSS-Ledger-Id/TSS-Ledger-Id.md b/platform-sdk/docs/proposals/TSS-Ledger-Id/TSS-Ledger-Id.md index a5df2d8c1c5c..28dbc5a07080 100644 --- a/platform-sdk/docs/proposals/TSS-Ledger-Id/TSS-Ledger-Id.md +++ b/platform-sdk/docs/proposals/TSS-Ledger-Id/TSS-Ledger-Id.md @@ -945,7 +945,7 @@ Outputs: 1. If voting is closed for the target roster or the vote is a second vote from the originating node, do nothing. 2. Add the `TssVoteTransaction` to the list for the target roster. -3. If the voting threshold is met by at least 1/2 consensus weight voting yes: +3. If the voting threshold is met by at least 1/3 consensus weight voting yes: 1. add the target roster hash to the` `votingClosed` set. 2. Non-Dynamic Address Book Semantics 1. if `keyActiveRoster` is false, do nothing here, rely on the startup logic to rotate the candidate roster to diff --git a/platform-sdk/docs/proposals/consensus-layer/Consensus-Layer.md b/platform-sdk/docs/proposals/consensus-layer/Consensus-Layer.md new file mode 100644 index 000000000000..2581921deb18 --- /dev/null +++ b/platform-sdk/docs/proposals/consensus-layer/Consensus-Layer.md @@ -0,0 +1,684 @@ +# Consensus Layer of the Consensus Node + +--- + +## Summary + +Update the architecture for the consensus node to reduce complexity, improve performance, and improve stability. + +| Metadata | Entities | +|--------------------|----------------------------------------------------------| +| Designers | Richard Bair, Jasper Potts, Oleg Mazurov, Austin Littley | +| Functional Impacts | Consensus Node | + +--- + +## Assumptions + +1. The proposed design assumes that the work to + [use a birth-round based definition of ancient](https://github.com/hashgraph/hedera-services/issues/13817) has been + completed + +## Purpose and Context + +Much of the motivation for this design can come down to paying down technical debt and simplifying the overall design. +While the current design is full of amazing high quality solutions to various problems, it is also more complex than +necessary, leading to hard-to-find or predict bugs, performance problems, or liveness (stability) issues while under +load. This work is also necessary to prepare for autonomous node operation, and community nodes. + +1. This design defines several high-level modules providing strong encapsulation and isolation with strict contracts + between modules, leading to an overall simpler and more correct system. +2. Assumptions and requirements that led to tight coupling between modules have been revisited, and where possible, + eliminated. +3. The two fundamental modules are "consensus" and "execution". The Consensus module takes transactions and produces + rounds. Everything required to make that happen (gossip, event validation, hashgraph, event creation, etc.) is part + of the Consensus module. It is a library, and instances of the classes and interfaces within this library are created + and managed by the Execution module. The Consensus module does not persist state in the merkle tree, has no main + method, and has minimal dependencies. +4. The Execution module is a mixture of what we have called "services" and some parts of "platform". The responsibility + for reconnect, state saving, lifecycle, etc. will be merged with modules making up the base of the application, + dramatically simplifying the interaction between "consensus" and "execution". +5. Maintaining high availability under unpredictable conditions ("liveness under stress"), will be designed based on a + combination of network and individual (per-node) actions. Each node individually will do its best to deal + gracefully when under stress, and the network as a whole will cooperate to reduce load when enough nodes in the + network are under stress. This network-wide throttling is based on "dynamic network throttles". + +The purpose of this document is not to describe the implementation details of each of the different modules. Nor does +it go into great detail about the design of the Execution module (which is primarily documented elsewhere). Instead, +it provides an overview of the whole system, with an emphasis on the Consensus module, and how the Consensus module +interacts with the Execution module. + +This document supports existing features implemented in new ways, and it provides for new features (such as the dynamic +address book) which have not been implemented. After acceptance, a long series of changes will be required to modify +the existing codebase to meet this new design. This will not happen overnight, nor will it block progress on all other +initiatives. Instead, this plan provides the blueprint for our new node architecture, which we will work towards +implementing with every change we make going forward. This blueprint will also provide the framework within which we +will evaluate all other feature designs and implementations. + +## Design + +![Network](network.png) + +A group of consensus nodes together form a _consensus network_. The network, as a whole, takes as input _transactions_ +and each node in the network produces as output a _blockchain_ represented as a _block stream_. Each node in the network +maintains _state_. The job of a node is to (a) work with other nodes to come to _consensus_ on which transactions to +include, (b) order those transactions and assign a _consensus timestamp_ to each transaction, (c) handle each +transaction, transitioning state from _S_ to _S'_, (d) produce blocks containing a history of inputs and state +transitions, (e) work with other nodes to sign each block, and (f) export the block. + +As a decentralized network, each node can make no assumptions about the other nodes in the network. Other nodes may be +faster, or slower. They may have superior or inferior network connections. They may be far away from each other or +colocated. Each of those parameters may change over time. They may be running modified software or the official builds. +They may be honest, or dishonest. Each node must assume that just more than 2/3 of the other nodes are honest, but must +also assume that any particular node, other than itself, may be dishonest. + +The network must also _as a whole_ remain resilient and operational regardless of the transaction load supplied to the +network. Nodes that are unable to keep up with the transaction load must be able to fail gracefully and rejoin the +network if conditions improve. If a sufficiently large number of nodes are unable to keep up with the transaction load, +then the network as a whole must collaborate to _throttle_ transaction ingestion to a level that will permit the network +to remain stable. + +The design of the consensus node _does not require_ careful tuning for particular execution environments in order +to remain live and responsive. Indeed, it is a hard requirement that tuning _cannot be required_. It is infeasible to +test the exact configuration of a decentralized network, by definition, and therefore cannot be required for stable +operation. + +![Design](consensus-module-arch.png) + +The consensus node is made up of two parts, a Consensus layer, and an Execution layer. The Consensus layer takes as +input transactions and produces as output an ordered list of rounds. Each round contains the ordered and timestamped +list of transactions produced by the hashgraph consensus algorithm. The Execution layer is responsible for executing +transactions, transitioning state, producing blocks, signing blocks, and exporting blocks. + +Each layer is represented by JPMS modules. The Consensus layer will actually be made up of two different modules -- an +API module and an implementation module, though unless the distinction is important, this document will usually refer +to just "the Consensus Module". The API module will define an `Interface` corresponding to the dotted-line box in the +Consensus layer blue box. The Execution implementation module will have a compile-time dependency on the Consensus +layer's API module, and a runtime dependency on the Consensus layer's implementation module. + +Each submodule will likewise be defined by a pair of JPMS modules -- an API module and an implementation module. By +separating the API and implementation modules, we make it possible to supply multiple implementation modules (which is +useful for testing or future maintenance tasks), and we also support circular dependencies between modules. + +### Foundational Concepts + +This design relies on several foundational concepts based on the hashgraph consensus algorithm. The term "hashgraph" +refers to a data structure, while the "hashgraph consensus algorithm" refers to the particular consensus algorithm +making use of a hashgraph for the purposes of consensus. The algorithm itself is only superficially described here, +sufficient only to understand the overall system architecture. + +A hashgraph is a directed acyclic graph (or DAG), made up of _events_. Each event maintains references to "parent" +events. When the hashgraph consensus algorithm runs, it collects events into _consensus rounds_. One or more rounds is +grouped together by the Execution layer, executed, and used to form a block in the blockchain. + +Each event contains an ordered list of _transactions_. + +Nodes create events. Each event in the hashgraph has a _creator_. The creator is the node that created the event. Each +event also has a _birth round_. This is the most recent round number known by the creator at the time it created the +event. When a node creates an event, it fills that event with some, or all, of the transactions it knows about. Each +creator creates a single event at a time, with some interval between event creations (say, every 50ms), and some maximum +network-wide configuration for the number of events per second per creator. Each event will have as a "self-parent" the +previous event created by that creator, assuming the creator remembers its previous event, and that event isn't ancient. +Each event will additionally have one or more "other parent" events created by other creators, apart from the edge cases +of network genesis and single node networks, where there may be zero other parents. + +Any given node has a system clock, and this clock provides the node with the current _wall clock time_. This is the +current "real" time, as the node understands it. Since we cannot trust the clock of any particular node, we cannot trust +the wall clock time of the creator to be accurate. The creator may lie. During the execution of the hashgraph consensus +algorithm, each node will deterministically assign a _consensus time_ to each event (and thus by extension to each of +the transactions within the event). + +Each node has a _roster_ listing all other nodes, their public cryptographic keys, their consensus weights (since the +network is a proof-of-stake network, different nodes may have different "weights" when voting for consensus), etc. The +cryptographic keys are used to verify that an event created by a creator was truly created by that creator. The roster +can change over time, so it is vital that the correct roster be used for verifying each event. The correct roster to use +for verifying an event is the roster that was active during that event's birth round. + +![Hashgraph](hashgraph.png) + +The hashgraph can be organized visually in a simple series of "swim lanes", running vertically, one per creator. Each +"other parent" is a line from an event to a swimlane for another creator. Newer events are on the top of the hashgraph. + +For example, in the above diagram, each event has exactly 2 parents: the self-parent, and one other-parent. Event `B3` +has as a self-parent `B2`, and an other-parent of `A2`. Events `A1`, `A2`, `A3`, and `A4` are all events created by node +`A`, while `B1`, `B2`, `B3`, and `B4` are created by node `B`, and so on. + +Each node has its own copy of the hashgraph. Since events are being gossiped asynchronously throughout the network, +newer events (those at the top of the graph) may be known to some nodes, and not to others. Broken or dishonest nodes +may work to prevent some events from being known to all nodes, and thus there may be some differences in the hashgraph +of each node. But the hashgraph algorithm will, with probability 1, come to consensus given just over 2/3 of the nodes +are honest. + +#### The Road to Finality + +A transaction is submitted to a node in the network. This node, upon verifying the integrity of the transaction, will +include this transaction in a future event it creates. This new event is assigned a _birth round_ matching the most +recent round number of the hashgraph on the node that created the event. This birth round is used to determine which +roster should be used to verify the event, and lets other nodes in the network know how far along in processing the +hashgraph this node was at the time the event was created. + +The event is then gossiped, or distributed throughout the network. Each node that receives this event validates it and +inserts it into their own copy of the hashgraph. Eventually, the hashgraph algorithm runs on each node (which may +happen at different wall clock times!) and the event will either become stale, or be included in a round. Every honest +node will always come to the same conclusion, and either determine that the event is stale, or include the event in the +same round. + +Each round is then passed to the Execution layer, where the transactions in the round are executed, and the state is +transitioned accordingly. For example, hbars may be transferred from one account to another. At the end of some +deterministic number of rounds, the Execution layer will create a block. The block hash will be signed and all the +nodes together will work to sign the block (using an algorithm known as TSS). The block is then exported from the +node. In cases where a block signature can't be created in time, then validity will propagate backwards from a signature +on a future block. + +Once the block is exported from the node, the transaction execution is truly final. Since the network together signed +the block, users have an iron-clad guarantee that the contents of the block represent the consensus result of executing +those transactions. Since they are included in a blockchain, there is an immutable, cryptographic proof of execution. + +### Liveness Under Stress + +A node is under stress when it is unable to process events fast enough to keep up with the other nodes in the network. +The network is under stress when a sufficient number of nodes are under stress. When the network is under stress, all +nodes together must work to reduce the number of transactions allowed into the network, to give the network a chance +to recover. The method by which this is done will be covered in another design document related to "dynamic throttling". + +The Consensus Layer performs work on events. The more events, the more work. If events are unbounded, then a node +under stress will eventually run out of memory and crash. If there are no events, then there is virtually no CPU or +memory used by Consensus. It is therefore critical that the number of events be bounded within any given node, +even if the node is running slower than other nodes in the network. Each node must maintain _at least_ all non-ancient +events, and should maintain additional non-expired events (though these could be stored on disk to remove them from +the memory requirement of the node). In addition, _birth round filtering_ prevents a node from accepting very old or far +future events (see [Birth-Round Filtering](#birth-round-filtering)). + +#### CPU Pressure + +From a high level, either Execution or Consensus can be the primary bottleneck in handling events. + +##### Consensus Bottlenecks + +Let us suppose that we have a node, Alice. Perhaps initially Alice is able to receive and process events at the same +speed as other nodes in the network. Perhaps the network load increases past some point that Alice can handle. At this +point, other nodes are receiving, creating, gossiping, and coming to consensus on rounds faster than Alice. Remember: + +1. Birth-round filtering limits the number of events received by Alice to coincide with the pace at which Alice is + handling consensus. +2. The Tipset algorithm only creates events when doing so will advance consensus. It relies on events that have already + passed through Birth-round filtering. + +As the other nodes progress farther than Alice, they begin to send events with newer and newer birth rounds. At some +point, they get too far ahead and begin delivering events that are too far in the future and fail to pass Alice's +birth-round filter. This prevents Alice from being overwhelmed by events. + +Further, since events are coming more slowly to Alice, she will also create her own events more slowly. + +Further, since events are coming more slowly to Alice, her Event Creator will update more slowly. When the tipset +algorithm is consulted for an "other parent", it will find after a short time that there are no other parents it can +select that will advance consensus. This will cause it to stop creating events until enough events from other creators +have been processed. This natural slowing of event creation provides a virtuous cycle: as each stressed node slows down +the event creation rate, it starts to create events with more and more transactions within each event and at a slower +rate. This will lead to fewer overall events, allowing it to do less work. If the node is still not able to keep up, +eventually it will refuse to accept any additional transactions from users. If enough nodes in the network are stressed, +then the overall transaction ingestion rate of the network will be reduced, further reducing the amount of work each +node has to do. Eventually, an equilibrium is reached. + +If the rate at which the network is creating events slows, Alice will be able to catch up by retrieving all required +events through gossip, and will be able to process them and catch up. Or, in the last extremity where Alice has fallen +too far behind, Alice will wait for some time and reconnect. + +##### Slow Execution + +Under normal circumstances, the Execution layer is always the actual bottleneck. The cost of processing a few hundreds +of events pales in comparison to the cost of processing tens of thousands of transactions. Execution must therefore +provide some backpressure on Consensus. In this design, we propose that the Hashgraph module **never** runs the +consensus algorithm until it is told to do so from the Execution module. + +The TCP Sliding Window is a classic technique for controlling backpressure in networking. The receiver controls the rate +of transmission by signalling to the sender how many bytes can be sent before receiving a new update from the receiver. +The same technique is used for backpressure in the HTTP2 protocol. We will use the same concept here. + +Execution will instruct Consensus each time it needs additional rounds processed. It could indicate any number of +additional rounds. For each round, it will produce the appropriate roster, even if the roster doesn't change between +rounds. (And if we have any other dynamic configuration, it is also provided in like manner). Execution is therefore +responsible for dictating the rate at which rounds can be produced, providing natural backpressure. In addition, by +sending the roster information for each round, a quick and efficient mechanism is provided for deterministically +changing the roster for any given round. This is very useful for punishing malicious nodes. + +By keeping Consensus tied to Execution in this way, if one node's Execution runs slowly compared to other nodes in the +network, that node will naturally handle consensus slower than the others, and will therefore eventually fall behind +and need to reconnect. Indeed, from the perspective of the rest of the consensus module, or from the other nodes in +the network, the behavior of the node in stress is **exactly the same** whether Consensus or Execution are the reason +for falling behind. + +##### A Silly Example + +Suppose we have a 4 node network, where 1 node is a Raspberry PI and the other 3 nodes are 64-core 256GB machines. In +this network, at 10 TPS, all 4 nodes may be able to process events and handle transactions without any problems. If the +transaction load were to increase to 10,000 TPS, then the Raspberry PI may not be able to keep up with this workload, +while the other 3 machines might. The healthy machines will continue to accept transactions and create new events, while +the machine under stress is unable to create new consensus rounds fast enough, since consensus is stalled waiting for +the Execution layer to finish processing previous rounds. As time goes on, the slow machine cannot receive all events +from other nodes, since the events are too far in the future. If this occurs, the slow machine will begin to fall +behind. + +It may be that the load decreases back to a manageable level, and the Raspberry PI is able to successfully "catch up" +with the other nodes. Or, it may be that the load continues long enough that the Raspberry PI falls fully behind. At +this point, the only recourse is for the node to reconnect. This it will do, using an exponential backoff algorithm, so +that if the PI is continually falling behind, it will wait a longer and longer time before it attempts to reconnect +again. + +Eventually, the PI may encounter a quieter network, and successfully reconnect and rejoin the network. Or the node +operator may decide to upgrade to a more capable machine so it can rejoin the network and participate. In either case, +the node was able to gracefully handle slow execution without having to take any direct or extraordinary action. + +### Lifecycle of the Consensus Module + +When Execution starts, it will (at the appropriate time in its startup routine) create an instance of Consensus, and +`initialize` it with appropriate arguments, which will be defined in detail in further documents. Critically, +Consensus **does not persist state in the merkle tree**. Execution is wholly responsible for the management of the +state. To start Consensus from a particular moment in time, Execution will need to initialize it with some information +such as the judges of the round it wants to start from. It is by using this `initialize` method that Execution is able +to create a Consensus instance that starts from genesis, or from a particular round. + +Likewise, if a node needs to reconnect, Execution will `destroy` the existing Consensus instance, and create a new one, +and `initialize` it appropriately with information from the starting round, after having downloaded necessary data and +initializing itself with the correct round. Reconnect therefore is the responsibility of Execution. Consensus does not +have to consider reconnect at all. + +## Modules + +### Gossip + +The Gossip module is responsible for gossiping messages between neighbors. The actual gossip implementation is not +described here, except to say that it will be possible to define and implement both event-aware and event-agnostic +gossip implementations either to a fully connected network or where the set of neighbors is a subset of the whole. This +document does not dictate whether raw TCP, UDP, HTTP2, gRPC, or other network protocols are used. This will be left to +the design documents for Gossip. + +![Gossip](gossip-module.png) + +Gossip is the only part of Consensus that communicates over the network with gossip neighbors. When Gossip is +initialized, it is supplied a roster. This roster contains the full set of nodes participating in gossip, along with +their metadata such as RSA signing keys, IP addresses, and so forth. The Gossip module decides which neighbors to gossip +with (using whatever algorithm it chooses). + +#### Events + +Gossip is event-oriented, meaning that it is given events to gossip, and emits events it receives through gossip. An +implementation of Gossip could be based on a lower-level implementation based on bytes, but at the module level, it +works in terms of events. + +When the Gossip module receives events through gossip, it *may* choose to perform some deduplication before sending +them to Event Intake, but it is not required to do so. + +Some gossip algorithms send events in *topological order*. A neighbor may still receive events out of order, because +different events may arrive from different neighbors at different times. Events received by Gossip are not immediately +retransmitted to its neighbors. **An honest node will only send valid events through gossip**. If invalid events are +ever received, you may know the node that sent them to you is dishonest. Validating events increases latency at each +"hop", but allows us to identify dishonest gossip neighbors and discipline them accordingly. For this reason, events +received by Gossip are sent to Event Intake, which eventually send valid, ordered events _back_ to Gossip for +redistribution. + +During execution, for all nodes that are online and able to keep up, events are received "live" and processed +immediately and re-gossiped. However, if a node is offline and then comes back online, or is starting back up after +reconnect, it may be missing events. In this case, the node will need to ask its neighbors for any events it is missing. + +For this reason, every honest node needs to buffer some events, so when its neighbors ask it for events, it is able to +send them. The Gossip module **may** cache all non-expired events, but **must** cache all non-ancient events. +Non-expired events are crucial, because such events allow a node that is moderately far behind its neighbors to catch +back up without incurring the high cost of a reconnect. This may occur during normal operation, if a node is +experiencing stress, but is particularly likely just after having performed a reconnect. A recently reconnected node may +be several minutes behind its neighbors, but still be able to catch the rest of the way up by receiving those older +events through gossip. + +#### Neighbor Discipline + +If a neighbor misbehaves, the Gossip module will notify the Sheriff module that one of its neighbors is misbehaving. For +example, if a neighbor is not responding to requests, even after repeated attempts to make a TCP connection with it, it +may be "bad". Or if the neighbor is sending events that exceed an acceptable rate, or exceed an acceptable size, then it +is "bad". Or if the events it sends cannot be parsed, or are signed incorrectly, or in other ways fail validation, then +it is "bad". There may be additional rules by the Gossip module or others (such as Event Intake detecting branching) +that could lead to a neighbor being marked as "bad". A "bad" node may be dishonest, or it may be broken. The two cases +may be indistinguishable, so punishment must be adjusted based on the severity of the behavior. + +If the Sheriff decides that the neighbor should be penalized, then it will instruct the Gossip module to "shun" that +neighbor. "Shunning" is a unilateral behavior that one node can take towards another, where it terminates the connection +and refuses to work further with that neighbor. If the Sheriff decides to welcome a neighbor back into the fold, it can +instruct the Gossip module to "welcome" the neighbor back. + +#### Falling Behind + +When a node is operating, it receives events from its neighbors through gossip. If the node for some reason is unable to +receive and process events fast enough, it may start to "fall behind" the other nodes. Perhaps Bob is processing round +200 while Alice is still on round 100. If Alice continues to fall farther and farther behind, the time may come when she +can no longer get old events from her neighbors. From the perspective of the neighbor, the events Alice says she needs +may have expired, and Bob may no longer be holding those events. + +If this happens, then Alice has "fallen behind" and must reconnect. There is no longer any chance that she can get the +events she needs through gossip alone. Gossip will detect this situation and make a call through the Consensus module +interface to notify Execution that the node is behind. Execution will then initiate reconnect. + +#### Roster Changes + +At runtime, it is possible that the roster will change dynamically (as happens with the dynamic address book feature). +Roster changes at the gossip level may influence which neighbors the module will work with. As with all other modules +using rosters, Gossip may need a deterministic understanding of which roster applies to which round. It will receive +this information from Hashgraph in the form of round metadata. + +### Event Intake + +The Event Intake System is responsible for receiving events, validating them, and emitting them in *topological order*. +It also makes sure they are durably persisted before emission, which prevents branching during upgrades, and adds +resilience to the node in case of certain catastrophic failure scenarios. + +![Event Intake](event-intake-module.png) + +#### Validation + +One of the core responsibilities of Event Intake is to validate the events it has received. While this document does +not specify the validation pipeline, it will define some of the primary steps involved in validation, so as to motivate +the purpose of this module. That is, the following description is non-normative, but important for understanding the +context within which this module operates. + +Event Intake receives events from gossip, or from the Event Creator module (i.e. "self-events"). Event Intake is +responsible for computing some pieces of metadata pertaining to the event, such as the event hash. Event Intake also +deduplicates events, and checks for "syntactic" correctness. For example, it verifies that all required fields are +populated. While the Gossip system has already checked to ensure the payload of the event (its transactions) are +limited in size and count, Event Intake will also check this as an additional safety measure. + +If an event is valid, then we finally check the signature. Since validation and deduplication and hashing are +significantly less expensive than signature verification, we wait on signature verification until the other steps are +completed. The operating principle is that we want to fail fast and limit work for further stages in the pipeline. + +If an event has a very old birth-round that is ancient, it is dropped. If a node sends a large number of ancient events, +it may end up being disciplined (the exact rules around this will be defined in subsequent design docs for the +Event Intake module). + +If an event builds upon a parent with a newer birth-round than itself, then it is invalid and discarded. + +#### Topological Ordering + +Events are buffered if necessary to ensure that each parent event has been emitted from Event Intake before any child +events. A simple map (the same used for deduplication) can be used here. Given some event, for each parent, look up the +parent by its hash. If each parent is found in the map, then emit the event. Otherwise, remember the event so when the +missing parent is received, the child may be emitted. The current implementation uses what is known as the "orphan +buffer" for this purpose. + +Since Event Intake will also maintain some buffers, it needs to know about the progression of the hashgraph, +so it can evict old events. In this case, the "orphan buffer" holds events until either the parent events have +arrived, or the events have become ancient due to the advancement of the "non-ancient event window" and the event is +dropped from the buffer. This document does not prescribe the existence of the orphan buffer or the method by which +events are sorted and emitted in topological order, but it does describe a method by which old events can be dropped. + +#### Birth-Round Filtering + +When an event is created, it is assigned a "birth round". This is the most recent consensus round on the node at the +time the event is created. Two different nodes in the same network might be working on very different moments in the +hashgraph timeline. One node, Alice, may be working on round 100 while a better-connected or faster node, Bob is working +on round 200. When Alice creates an event, it will be for birth-round 100, while an event created by Bob at the same +instant would be for birth-round 200. + +It is not possible for any one node to get much farther ahead of all other nodes, since the only way to have a newer +birth-round is to advance the hashgraph, and that requires >2/3 of the network by stake weight. Therefore, in this +example, Alice is not just 100 rounds behind Bob, she must be 100 rounds behind at least 2/3 of the network by +stake weight, or, Bob is lying. He may create events at round 200, but not actually have a hashgraph that has advanced +to that round. + +Each event must be validated using the roster associated with its birth-round. If Alice is far behind Bob, and she +receives an event for a birth-round she doesn't have the roster for, then she cannot validate the event. If the Event +Intake module receives a far-future event which cannot be validated, then the event will be dropped. + +#### Self Events + +Events are not only given to the Event Intake system through gossip. Self events (those events created by the node +itself) are also fed to Event Intake. These events **may** bypass some steps in the pipeline. For example, self-events +do not need validation. Likewise, when replaying events from the pre-consensus event buffer, those checks are not needed +(since they have already been proved valid and are in topological order). + +#### Neighbor Discipline + +During the validation process, invalid events are rejected, and this information is passed to the Sheriff module so the +offending node may be disciplined. Note that the node to be disciplined will be the node that sent this bad event, not +the creator. This information (which node sent the event) must be captured by Gossip and passed to Event Intake as part +of the event metadata. + +#### Branch Detection + +The Event Intake module inspects events to determine whether any given event creator is "branching" the hashgraph. A +"branch" happens when two or more different events from the same creator have the same "self-event" parent. Any node +that branches (known affectionately as a "Dirty Rotten Brancher") will be reported to the Sheriff. Branching is a sign +of either a dishonest node, or a seriously broken node. In either case, it may be subject to "shunning", and will be +reported to the Execution layer for further observation and, if required, action (such as canceling rewards for +stakers to that node). + +Pre-consensus branch detection and remediation can happen quickly, but to _prove_ a node is a DRB, the check will +have to happen in the Hashgraph module. When determining to take system-wide action, only the actually proven bad +behavior post-consensus should be used. + +#### Persistence + +Event Intake is also responsible to durably persist pre-consensus events **before** they are emitted, but after they +have been ordered. This system is known as the "Pre-Consensus Event Stream", or PCES. The current implementation +requires coordination between the PCES and the Hashgraph component to know when to flush, and the PCES needs to know +when rounds are signed so it knows when to prune files from the PCES. + +It is essential for events to be durably persisted before being sent to the Hashgraph, and self-events must be +persisted before being gossiped. While it may not be necessary for all code paths to have durable pre-consensus events +before they can handle them, to simplify the understanding of the system, we simply make all events durable before +distributing them. This leads to a nice, clean, simple understanding that, during replay, the entire system will +behave predictably. + +#### Emitting Events + +When the Event Intake module emits valid, topologically sorted events, it sends them to: +- The Gossip module, to be sent to gossip neighbors +- The Event Creator module, for "other parent" selection +- The Execution layer as a "pre-handle" event +- The Hashgraph module for consensus + +The call to each of these systems is "fire and forget". Specifically, there is no guarantee to Execution that it will +definitely see an event via `pre-handle` prior to seeing it in `handle`. Technically, Consensus always calls +`pre-handle` first, but that thread may be parked arbitrarily long by the system and the `handle` thread may actually +execute first. This is extremely unlikely, but must be defended against in the Execution layer. + +### Hashgraph Module + +The Hashgraph module orders events into rounds, and assigns timestamps to events. It is given ordered, persisted +events from Event Intake. Sometimes when an event is added, it turns out to be the last event that was needed to cause +one or more entire "rounds" of events to come to consensus. When this happens, the Hashgraph module emits a `round`. +The round includes metadata about the round (the list of judge hashes, the round number, the roster, etc.) along with +the events that were included in the round, in order, with their consensus-assigned timestamps. + +![Hashgraph](hashgraph-module.png) + +Rounds are immutable. They are sent "fire and forget" style from the Hashgraph module to other modules that require +them. Some modules only really need the metadata, or a part of the metadata. Others require the actual round data. +We will pass the full round info (metadata + events) to all listeners, and they can pull from it what they need. + +#### Roster and Configuration Changes + +When the roster (or any other network-wide configuration impacting Consensus) changes, Consensus must be updated in a +deterministic manner. This section discusses specifically how rosters are updated, but configuration in general would +use a similar path. + +![Roster Changes](roster-changes.png) + +Execution may hold many rounds. It will have a round that is currently being signed, one that is being hashed (which +happens before signing), one that is being handled, and any number of rounds it is holding awaiting execution. It is +very likely this "awaiting execution" buffer will be a very small number, perhaps as small as 1. This number of rounds +held in buffer is completely under the control of Execution. Consensus does not concern itself with rounds once they +are handed to Execution. + +Consensus **only produces rounds on demand**. Execution will ask Consensus to produce the next round. Only then does +Consensus begin inserting events into the hashgraph and executing the hashgraph consensus algorithm. When any one event +is added to the hashgraph, it may produce 0 or more consensus rounds. Events are added until at least one consensus +round is produced. + +The Execution module is responsible for defining the roster. When it asks Consensus for a new round, it also supplies +the new roster. The Hashgraph module will decide in what round the new roster becomes active. Let us define `N` as the +number of rounds from the latest consensus round at which the candidate roster will become active. This number will be +configured with the same value for all nodes in the network. + +For example, suppose Execution has just finished handling round 10, and has buffered up round 11. The most recent round +that the Hashgraph module produced was round 11. Suppose `N` is 10. If the call to `nextRound` from Execution at the +end of round 10 were to supply a new Roster (`R`). The Hashgraph module will assign `R` to become active as of round 22 +(`latest_consensus_round + N + 1`). It will then work on producing consensus round 12. Once produced, metadata +indicating that `R` will be used for rounds starting at 22 is passed to Gossip, Event Intake, Event Creator, and +Execution. The roster used for round 22 may also be included in this metadata. + +There is a critical relationship between the value of N, the latency for adopting a new roster, and the number of future +events held in memory on the node. Remember that each event has a birth-round. Let us define another variable called +`Z`, such that `Z <= N`. Any event with `birth_round > latest_consensus_round + z` is considered a "far future" event, +and will be either dropped by the birth-round filtering logic in Event Intake, or simply never sent in the first place. +Any event with `birth_round > latest_consensus_round` is just a "future" event. + +The smaller the value of `Z`, the fewer events are held in memory on the node. Larger values of `Z` means more events in +memory, but it also means more "smoothing" in gossip and in handling any performance spikes. On the other hand, the +larger the value of `N`, the larger the latency between the round we know about a new roster, and the round at which it +can be actually used. Ideally, the value of `N` would be small, like 3, but we may find that 3 is too small a number for +`Z`. + +Each node may select its own value of `Z`, so long as it is less than or equal to `N`. But all nodes must use the same +value for `N`, or they will ISS since they will assign different rosters to different rounds. The value of `N` is not +defined here, but will need some investigation. Unfortunately, this number appears to require "just being chosen" and +cannot be deterministically dynamically computed based on network conditions. + +#### State + +The Hashgraph module also includes a `state` section in the metadata of the round. This is used by the Execution layer +to persist the Consensus state, for reconnect and for restart. In the state are the judges for the round, and possibly +other information. When `initialize` is called, this same state is made available again to the Consensus node. + +### Event Creator Module + +Every node in the network participating in consensus is permitted to create events to gossip to neighbors. These events +are used both for transmitting user transactions, and as the basis of the hashgraph algorithm for "gossiping about +gossip". Therefore, the Event Creator has two main responsibilities: + +1. Create events with "other parent(s)" so as to help the hashgraph progress consensus +2. Fill events with transactions to be sent to the network through Gossip + +![Event Creator](event-creator-module.png) + +#### Creating Events + +The Event Creator is configured with a `maximum_event_creation_frequency`, measured in events/sec. This is a network +wide setting. If any node creates events more rapidly than this setting, then the node will be reported to the Sheriff. +An event is not necessarily created at this frequency, but will be created at no more than this frequency. + +When it is time to potentially create an event, the Event Creator will determine whether it *should* create the event. +It may consider whether there are any transactions to send, or whether creating an event will help advance the +hashgraph. It may decide that creating the event would be bad for the network, and veto such creation. Or it may decide +that creating the event should be permitted. + +If the event is to be created, the Event Creator will decide which nodes to select as "other parents". Today, we have +exactly one "other parent" per event, but multiple "other parents" is shown to effectively reduce latency and network +traffic. While the implementation of Event Creator may choose to support only a single "other parent", the module is +designed and intended to support multiple "other parents". + +#### Filling Events + +Events form a large amount of the network traffic between nodes. Each event has some overhead in terms of metadata, +such as the hashes of the parent events and cryptographic signatures. Thus, for bandwidth and scalability reasons, it is +more desirable to have fewer, large events rather than many small events. On the other hand, events should be created +frequently enough to reduce the overall latency experienced by a transaction. The Event Creator is designed so as to +find the optimal balance between event creation frequency and size. The particular algorithm that does so (the Tipset +algorithm, or "Enhanced Other Parent Selection" algorithm) is not defined here, but can be found in the design +documentation for Event Creator. + +When it is time to create a new event, a call is made to Execution to fill the event with user transactions. Newly +created events are sent to Event Intake, which then validates them, assigns generations, durably persists them, etc., +before sending them out through Gossip and so forth. + +#### Stale Self-Events + +The Event Creator needs to know about the state of the hashgraph for several reasons. If it uses the Tipset algorithm, +then it needs a way to evict events from its internal caches that are ancient. And it needs to report "stale" +self-events to the Execution layer. A stale self-event is a self-event that became ancient without ever coming to +consensus. If the Event Creator determines that a self-event has become stale, then it will notify the Execution layer. +Execution may look at each transaction within the self-event, and decide that some transactions (such as those that have +expired or will soon expire) should be dropped while others (such as those not close to expiration) should be +resubmitted in the next event. + +### Sheriff Module + +When misbehavior is found for a node, it is reported to the Sheriff. This module keeps track of the different types of +misbehavior each node is accused of, and uses this information to determine whether to "shun" or "welcome" a node. It +also sends this information to the Execution layer, so it may record misbehavior in state, if it so chooses, or publish +misbehavior to other nodes in the network, allowing the network as a whole to observe and report dishonest or broken +nodes. + +![Sheriff](sheriff-module.png) + +A node may "shun" another node, by refusing to talk with it via Gossip. If a node were to be shunned by all its +gossip neighbors, then it has been effectively removed from the network, as it can no longer submit events that will be +spread through the network, and will therefore not contribute to consensus. Should a malicious node attempt to attack +its neighbors, if those neighbors discover this attack and simply shun the node, then taking no other action, the +malicious node is prevented from causing further harm to the network. + +It may be that a node is misbehaving due to a bug, or environmental issue, rather than due to malicious intent. For +example, a broken node with network trouble may attempt to create many connections, as each prior connection having +failed for some reason. But it could also be malicious intent. Unable to tell the difference, the Sheriff may decide to +shun the node for some time period, and then "welcome" it back by allowing it to form connections again. It is up to +the Sheriff's algorithms to decide on the correct response to different behaviors. These algorithms are not defined +here, but will be defined within the Sheriff's design documentation. + +### Public API + +![Consensus API](consensus-api.png) + +The public API of the Consensus module forms the interface boundary between Consensus and Execution. + +#### initialize + +Called by the Execution layer when it creates a new Consensus instance. This call will include any required arguments +to fully initialized Consensus. For example, it will include any initial state corresponding to the current round +at which Consensus should begin work (such as the correct set of judges for that particular round). Each module within +Consensus supports some kind of initialization API that will be called. For example, each module must be initialized +with the initial roster for the starting round. + +#### destroy + +Called by the Execution layer when it destroys an existing Consensus instance. This is particularly needed to shut down +network connections held by Gossip, but could be used to stop executors, background tasks, etc. as necessary. + +#### onBehind + +Called by Consensus to notify Execution that the Consensus system is very far behind in processing relative to its +neighbors (most likely because it cannot find a neighbor that contains any of the events needed for advancing consensus). +Execution will use this call to initiate a reconnect procedure. + +#### onBadNode + +Called by Consensus to notify Execution of information about bad nodes. This can include both when a node goes "bad", +or when it is back in the good graces of the Sheriff. + +#### badNode + +Called by Execution to notify Consensus of information about bad nodes. The information to be passed here pertains to +coordinated enforcement decisions that the network has come to consensus on. + +#### getTransactionsForEvent + +Called by the Event Creator module of Consensus to give the Execution layer a chance to provide transactions for an +event. Note that, in the current system, the control is inverted -- transactions are submitted by Execution to +Consensus, which buffers them and includes them in events. As per this new design, Consensus will reach out to Execution +to ask it for all transactions that should be included in the next event, allowing the Execution layer (the application) +to decide which transactions to include. + +#### nextRound + +Called by the Execution layer when it is ready to receive another round from the Consensus layer. This call may contain +a new roster, if one is prepared and ready for use. + +#### onStaleEvent + +Called by Consensus to notify Execution when an event has become stale, giving Execution a chance to inspect the +transactions of that stale event, and resubmit those transactions in the next `onNewEvent` if they are still valid. + +#### onPreHandleEvent + +Called by Consensus once for each event emitted in topological order from Event Intake, giving Execution a chance to +perform some work before the event even comes to consensus. + +#### onRound + +Called by Consensus once for each round that comes to consensus. diff --git a/platform-sdk/docs/proposals/consensus-layer/consensus-api.png b/platform-sdk/docs/proposals/consensus-layer/consensus-api.png new file mode 100644 index 000000000000..afcdcb1b71b0 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/consensus-api.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/consensus-module-arch.png b/platform-sdk/docs/proposals/consensus-layer/consensus-module-arch.png new file mode 100644 index 000000000000..c2c4a4420100 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/consensus-module-arch.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/event-creator-module.png b/platform-sdk/docs/proposals/consensus-layer/event-creator-module.png new file mode 100644 index 000000000000..4a3524f4d7b1 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/event-creator-module.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/event-intake-module.png b/platform-sdk/docs/proposals/consensus-layer/event-intake-module.png new file mode 100644 index 000000000000..fb08bb6c0053 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/event-intake-module.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/gossip-module.png b/platform-sdk/docs/proposals/consensus-layer/gossip-module.png new file mode 100644 index 000000000000..f6b7392a3958 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/gossip-module.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/hashgraph-module.png b/platform-sdk/docs/proposals/consensus-layer/hashgraph-module.png new file mode 100644 index 000000000000..b01515baf992 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/hashgraph-module.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/hashgraph.png b/platform-sdk/docs/proposals/consensus-layer/hashgraph.png new file mode 100644 index 000000000000..0874275a38d4 Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/hashgraph.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/network.png b/platform-sdk/docs/proposals/consensus-layer/network.png new file mode 100644 index 000000000000..9539d3207aeb Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/network.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/roster-changes.png b/platform-sdk/docs/proposals/consensus-layer/roster-changes.png new file mode 100644 index 000000000000..3f5a75e8c75a Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/roster-changes.png differ diff --git a/platform-sdk/docs/proposals/consensus-layer/sheriff-module.png b/platform-sdk/docs/proposals/consensus-layer/sheriff-module.png new file mode 100644 index 000000000000..d8b04e5d6aaa Binary files /dev/null and b/platform-sdk/docs/proposals/consensus-layer/sheriff-module.png differ diff --git a/platform-sdk/event-creator-impl/build.gradle.kts b/platform-sdk/event-creator-impl/build.gradle.kts new file mode 100644 index 000000000000..2db99cdefb2c --- /dev/null +++ b/platform-sdk/event-creator-impl/build.gradle.kts @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + id("com.hedera.gradle.services") + id("com.hedera.gradle.services-publish") +} + +description = "Default Event Creator Implementation" diff --git a/platform-sdk/event-creator-impl/src/main/java/module-info.java b/platform-sdk/event-creator-impl/src/main/java/module-info.java new file mode 100644 index 000000000000..859eb9d4f59a --- /dev/null +++ b/platform-sdk/event-creator-impl/src/main/java/module-info.java @@ -0,0 +1,6 @@ +module org.hiero.event.creator.impl { + requires transitive org.hiero.event.creator; + + provides org.hiero.event.creator.EventCreator with + org.hiero.event.creator.impl.EventCreatorImpl; +} diff --git a/platform-sdk/event-creator-impl/src/main/java/org/hiero/event/creator/impl/EventCreatorImpl.java b/platform-sdk/event-creator-impl/src/main/java/org/hiero/event/creator/impl/EventCreatorImpl.java new file mode 100644 index 000000000000..fb4c08638ba4 --- /dev/null +++ b/platform-sdk/event-creator-impl/src/main/java/org/hiero/event/creator/impl/EventCreatorImpl.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.hiero.event.creator.impl; + +import org.hiero.event.creator.EventCreator; + +/** + * Implementation for the mock event creator. + */ +public class EventCreatorImpl implements EventCreator {} diff --git a/platform-sdk/event-creator/build.gradle.kts b/platform-sdk/event-creator/build.gradle.kts new file mode 100644 index 000000000000..004e5295ff57 --- /dev/null +++ b/platform-sdk/event-creator/build.gradle.kts @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + id("com.hedera.gradle.services") + id("com.hedera.gradle.services-publish") +} + +description = "Event Creator API" diff --git a/platform-sdk/event-creator/src/main/java/module-info.java b/platform-sdk/event-creator/src/main/java/module-info.java new file mode 100644 index 000000000000..46e83d50a218 --- /dev/null +++ b/platform-sdk/event-creator/src/main/java/module-info.java @@ -0,0 +1,3 @@ +module org.hiero.event.creator { + exports org.hiero.event.creator; +} diff --git a/platform-sdk/event-creator/src/main/java/org/hiero/event/creator/EventCreator.java b/platform-sdk/event-creator/src/main/java/org/hiero/event/creator/EventCreator.java new file mode 100644 index 000000000000..91fccd7dd9fc --- /dev/null +++ b/platform-sdk/event-creator/src/main/java/org/hiero/event/creator/EventCreator.java @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.hiero.event.creator; + +/** + * Mock event creator. + */ +public interface EventCreator {} diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/builder/PlatformComponentBuilder.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/builder/PlatformComponentBuilder.java index fd1264a20ee0..4e68bd940fe6 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/builder/PlatformComponentBuilder.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/builder/PlatformComponentBuilder.java @@ -21,6 +21,7 @@ import static com.swirlds.platform.gui.internal.BrowserWindowManager.getPlatforms; import static com.swirlds.platform.state.iss.IssDetector.DO_NOT_IGNORE_ROUNDS; +import com.hedera.hapi.node.state.roster.Roster; import com.swirlds.common.merkle.utility.SerializableLong; import com.swirlds.common.threading.manager.AdHocThreadManager; import com.swirlds.platform.SwirldsPlatform; @@ -72,6 +73,7 @@ import com.swirlds.platform.gossip.SyncGossip; import com.swirlds.platform.pool.DefaultTransactionPool; import com.swirlds.platform.pool.TransactionPool; +import com.swirlds.platform.roster.RosterRetriever; import com.swirlds.platform.state.hasher.DefaultStateHasher; import com.swirlds.platform.state.hasher.StateHasher; import com.swirlds.platform.state.hashlogger.DefaultHashLogger; @@ -192,6 +194,16 @@ private void throwIfAlreadyUsed() { } } + /** + * Get the roster from the initial state in PlatformBuildingBlocks. + * + * @return the initial roster + */ + @NonNull + private Roster getInitialRoster() { + return RosterRetriever.buildRoster(blocks.initialAddressBook()); + } + /** * Build the platform. * @@ -499,7 +511,7 @@ public EventCreationManager buildEventCreationManager() { blocks.platformContext(), blocks.randomBuilder().buildNonCryptographicRandom(), data -> new PlatformSigner(blocks.keysAndCerts()).sign(data), - blocks.initialAddressBook(), + getInitialRoster(), blocks.selfId(), blocks.appVersion(), blocks.transactionPoolNexus()); diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/config/legacy/LegacyConfigPropertiesLoader.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/config/legacy/LegacyConfigPropertiesLoader.java index c1099c02e9f0..f29d906c4e0d 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/config/legacy/LegacyConfigPropertiesLoader.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/config/legacy/LegacyConfigPropertiesLoader.java @@ -56,7 +56,7 @@ public final class LegacyConfigPropertiesLoader { public static final String ERROR_MORE_THAN_ONE_APP = "config.txt had more than one line starting with 'app'. All but the last will be ignored."; public static final String ERROR_NO_PARAMETER = "%s needs a parameter"; - public static final String ERROR_ADDRESS_NOT_ENOUGH_PARAMETERS = "'address' needs a minimum of 7 parameters"; + public static final String ERROR_ADDRESS_COULD_NOT_BE_PARSED = "'address' could not be parsed"; public static final String ERROR_PROPERTY_NOT_KNOWN = "'%s' in config.txt isn't a recognized first parameter for a line"; public static final String ERROR_NEXT_NODE_NOT_GREATER_THAN_HIGHEST_ADDRESS = @@ -115,16 +115,20 @@ public static LegacyConfigProperties loadConfigFile(@NonNull final Path configPa addressBook.add(address); } } catch (final ParseException ex) { - logger.error( - EXCEPTION.getMarker(), - "file {}, line {}, offset {}: {}", - configPath, - lineNumber, - ex.getErrorOffset(), - ex.getMessage()); - onError(ERROR_ADDRESS_NOT_ENOUGH_PARAMETERS); + // if we fail to parse address, we must abort since otherwise node starts with subset + // of node keys and fails to join the network eventually. + throw new ConfigurationException( + String.format( + "%s [line: %d]: %s", + ERROR_ADDRESS_COULD_NOT_BE_PARSED, lineNumber, line), + ex); } } + case "nextnodeid" -> { + // As of release 0.56, nextNodeId is not used and ignored. + // CI/CD pipelines need to be updated to remove this field from files. + // Future Work: remove this case when nextNodeId is no longer present in CI/CD pipelines. + } default -> onError(ERROR_PROPERTY_NOT_KNOWN.formatted(pars[0])); } } diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/CryptoStatic.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/CryptoStatic.java index 47e83641f986..b0730071cb9e 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/CryptoStatic.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/CryptoStatic.java @@ -42,8 +42,10 @@ import com.swirlds.platform.system.address.Address; import com.swirlds.platform.system.address.AddressBook; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.ByteArrayInputStream; import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.math.BigInteger; import java.nio.file.Files; import java.nio.file.Path; @@ -61,6 +63,7 @@ import java.security.UnrecoverableKeyException; import java.security.cert.Certificate; import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Arrays; @@ -232,6 +235,22 @@ public static X509Certificate generateCertificate( } } + /** + * Decode a X509Certificate from a byte array that was previously obtained via X509Certificate.getEncoded(). + * + * @param encoded a byte array with an encoded representation of a certificate + * @return the certificate reconstructed from its encoded form + */ + @NonNull + public static X509Certificate decodeCertificate(@NonNull final byte[] encoded) { + try (final InputStream in = new ByteArrayInputStream(encoded)) { + final CertificateFactory factory = CertificateFactory.getInstance("X.509"); + return (X509Certificate) factory.generateCertificate(in); + } catch (CertificateException | IOException e) { + throw new CryptographyException(e); + } + } + /** * Create a new trust store that is initially empty, but will later have all the members' key agreement public key * certificates added to it. @@ -530,6 +549,7 @@ public static Map initNodeSecurity( if (cryptoConfig.enableNewKeyStoreModel()) { logger.debug(STARTUP.getMarker(), "Reading keys using the enhanced key loader"); keysAndCerts = EnhancedKeyStoreLoader.using(addressBook, configuration) + .migrate() .scan() .generateIfNecessary() .verify() diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoader.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoader.java index 8d5c668dd46b..cec5cab6f185 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoader.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoader.java @@ -17,6 +17,7 @@ package com.swirlds.platform.crypto; import static com.swirlds.common.utility.CommonUtils.nameToAlias; +import static com.swirlds.logging.legacy.LogMarker.ERROR; import static com.swirlds.logging.legacy.LogMarker.STARTUP; import static com.swirlds.platform.crypto.CryptoConstants.PUBLIC_KEYS_FILE; import static com.swirlds.platform.crypto.CryptoStatic.copyPublicKeys; @@ -32,8 +33,11 @@ import com.swirlds.platform.system.address.AddressBook; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.io.OutputStreamWriter; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -49,14 +53,18 @@ import java.security.Security; import java.security.UnrecoverableKeyException; import java.security.cert.Certificate; +import java.security.cert.CertificateEncodingException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; @@ -75,6 +83,8 @@ import org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo; import org.bouncycastle.pkcs.PKCSException; import org.bouncycastle.util.encoders.DecoderException; +import org.bouncycastle.util.io.pem.PemObject; +import org.bouncycastle.util.io.pem.PemWriter; /** * This class is responsible for loading the key stores for all nodes in the address book. @@ -1165,4 +1175,344 @@ private interface AddressBookCallback { void apply(int index, NodeId nodeId, Address address, String nodeAlias) throws KeyStoreException, KeyLoadingException; } + + ///////////////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////// MIGRATION METHODS ////////////////////////////////////////// + ///////////////////////////////////////////////////////////////////////////////////////////////// + + /** + * Performs any necessary migration steps to ensure the key storage is up-to-date. + *

    + * As of release 0.56 the on-disk cryptography should reflect the following structure: + *

      + *
    • s-private-alias.pem - the private signing key
    • + *
    • s-public-alias.pem - the public signing certificates of each node
    • + *
    • all *.pfx files moved to OLD_PFX_KEYS subdirectory and no longer used.
    • + *
    • all agreement key material is deleted from disk.
    • + *
    + * + * @return this {@link EnhancedKeyStoreLoader} instance. + */ + @NonNull + public EnhancedKeyStoreLoader migrate() throws KeyLoadingException, KeyStoreException { + logger.info(STARTUP.getMarker(), "Starting key store migration"); + final Map pfxPrivateKeys = new HashMap<>(); + final Map pfxCertificates = new HashMap<>(); + + // delete agreement keys permanently. They are being created at startup by generateIfNecessary() after scan(). + deleteAgreementKeys(); + + // create PEM files for signing keys and certs. + long errorCount = extractPrivateKeysAndCertsFromPfxFiles(pfxPrivateKeys, pfxCertificates); + + if (errorCount == 0) { + // validate only when there are no errors extracting pem files. + errorCount = validateKeysAndCertsAreLoadableFromPemFiles(pfxPrivateKeys, pfxCertificates); + } + + if (errorCount > 0) { + // roll back due to errors. + // this deletes any pem files created, but leaves the agreement keys deleted. + logger.error(STARTUP.getMarker(), "Due to {} errors, reverting pem file creation.", errorCount); + rollBackSigningKeysAndCertsChanges(pfxPrivateKeys, pfxCertificates); + } else { + // cleanup pfx files by moving them to subdirectory + cleanupByMovingPfxFilesToSubDirectory(); + logger.info(STARTUP.getMarker(), "Finished key store migration."); + } + + return this; + } + + /** + * Delete any agreement keys from the key store directory. + */ + private void deleteAgreementKeys() { + // delete any agreement keys of the form a-* + final File[] agreementKeyFiles = keyStoreDirectory.toFile().listFiles((dir, name) -> name.startsWith("a-")); + if (agreementKeyFiles != null) { + for (final File agreementKeyFile : agreementKeyFiles) { + if (agreementKeyFile.isFile()) { + try { + Files.delete(agreementKeyFile.toPath()); + logger.debug(STARTUP.getMarker(), "Deleted agreement key file {}", agreementKeyFile.getName()); + } catch (final IOException e) { + logger.error( + ERROR.getMarker(), + "Failed to delete agreement key file {}", + agreementKeyFile.getName()); + } + } + } + } + } + + /** + * Extracts the private keys and certificates from the PFX files and writes them to PEM files. + * + * @param pfxPrivateKeys the map of private keys being extracted (Updated By Method Call) + * @param pfxCertificates the map of certificates being extracted (Updated By Method Call) + * @return the number of errors encountered during the extraction process. + * @throws KeyStoreException if the underlying method calls throw this exception. + * @throws KeyLoadingException if the underlying method calls throw this exception. + */ + private long extractPrivateKeysAndCertsFromPfxFiles( + final Map pfxPrivateKeys, final Map pfxCertificates) + throws KeyStoreException, KeyLoadingException { + final KeyStore legacyPublicStore = resolveLegacyPublicStore(); + final AtomicLong errorCount = new AtomicLong(0); + + iterateAddressBook(addressBook, (i, nodeId, address, nodeAlias) -> { + if (isLocal(address)) { + // extract private keys for local nodes + final Path sPrivateKeyLocation = keyStoreDirectory.resolve("s-private-" + nodeAlias + ".pem"); + final Path ksLocation = legacyPrivateKeyStore(nodeAlias); + if (!Files.exists(sPrivateKeyLocation) && Files.exists(ksLocation)) { + logger.trace( + STARTUP.getMarker(), + "Extracting private signing key for node {} from file {}", + nodeId, + ksLocation.getFileName()); + final PrivateKey privateKey = + readLegacyPrivateKey(nodeId, ksLocation, KeyCertPurpose.SIGNING.storeName(nodeAlias)); + pfxPrivateKeys.put(nodeId, privateKey); + if (privateKey == null) { + logger.error( + ERROR.getMarker(), + "Failed to extract private signing key for node {} from file {}", + nodeId, + ksLocation.getFileName()); + errorCount.incrementAndGet(); + } else { + logger.trace( + STARTUP.getMarker(), + "Writing private signing key for node {} to PEM file {}", + nodeId, + sPrivateKeyLocation.getFileName()); + try { + writePemFile(true, sPrivateKeyLocation, privateKey.getEncoded()); + } catch (final IOException e) { + logger.error( + ERROR.getMarker(), + "Failed to write private key for node {} to PEM file {}", + nodeId, + sPrivateKeyLocation.getFileName()); + errorCount.incrementAndGet(); + } + } + } + } + + // extract certificates for all nodes + final Path sCertificateLocation = keyStoreDirectory.resolve("s-public-" + nodeAlias + ".pem"); + final Path ksLocation = legacyCertificateStore(); + if (!Files.exists(sCertificateLocation) && Files.exists(ksLocation)) { + logger.trace( + STARTUP.getMarker(), + "Extracting signing certificate for node {} from file {} ", + nodeId, + ksLocation.getFileName()); + final Certificate certificate = + readLegacyCertificate(nodeId, nodeAlias, KeyCertPurpose.SIGNING, legacyPublicStore); + pfxCertificates.put(nodeId, certificate); + if (certificate == null) { + logger.error( + ERROR.getMarker(), + "Failed to extract signing certificate for node {} from file {}", + nodeId, + ksLocation.getFileName()); + errorCount.incrementAndGet(); + } else { + logger.trace( + STARTUP.getMarker(), + "Writing signing certificate for node {} to PEM file {}", + nodeId, + sCertificateLocation.getFileName()); + try { + writePemFile(false, sCertificateLocation, certificate.getEncoded()); + } catch (final CertificateEncodingException | IOException e) { + logger.error( + ERROR.getMarker(), + "Failed to write signing certificate for node {} to PEM file {}", + nodeId, + sCertificateLocation.getFileName()); + errorCount.incrementAndGet(); + } + } + } + }); + return errorCount.get(); + } + + /** + * Validates that the private keys and certs in PEM files are loadable and match the PFX loaded keys and certs. + * + * @param pfxPrivateKeys the map of private keys being extracted. + * @param pfxCertificates the map of certificates being extracted. + * @return the number of errors encountered during the validation process. + * @throws KeyStoreException if the underlying method calls throw this exception. + * @throws KeyLoadingException if the underlying method calls throw this exception. + */ + private long validateKeysAndCertsAreLoadableFromPemFiles( + final Map pfxPrivateKeys, final Map pfxCertificates) + throws KeyStoreException, KeyLoadingException { + final AtomicLong errorCount = new AtomicLong(0); + iterateAddressBook(addressBook, (i, nodeId, address, nodeAlias) -> { + if (isLocal(address) && pfxCertificates.containsKey(nodeId)) { + // validate private keys for local nodes + final Path ksLocation = privateKeyStore(nodeAlias, KeyCertPurpose.SIGNING); + final PrivateKey pemPrivateKey = readPrivateKey(nodeId, ksLocation); + if (pemPrivateKey == null + || !Arrays.equals( + pemPrivateKey.getEncoded(), + pfxPrivateKeys.get(nodeId).getEncoded())) { + logger.error(ERROR.getMarker(), "Private key for node {} does not match the migrated key", nodeId); + errorCount.incrementAndGet(); + } + } + + // validate certificates for all nodes PEM files were created for. + if (pfxCertificates.containsKey(nodeId)) { + final Path ksLocation = certificateStore(nodeAlias, KeyCertPurpose.SIGNING); + final Certificate pemCertificate = readCertificate(nodeId, ksLocation); + try { + if (pemCertificate == null + || !Arrays.equals( + pemCertificate.getEncoded(), + pfxCertificates.get(nodeId).getEncoded())) { + logger.error( + ERROR.getMarker(), + "Certificate for node {} does not match the migrated certificate", + nodeId); + errorCount.incrementAndGet(); + } + } catch (final CertificateEncodingException e) { + logger.error(ERROR.getMarker(), "Encoding error while validating certificate for node {}.", nodeId); + errorCount.incrementAndGet(); + } + } + }); + return errorCount.get(); + } + + /** + * Rollback the creation of PEM files for signing keys and certificates. + * + * @param pfxPrivateKeys the map of private keys being extracted. + * @param pfxCertificates the map of certificates being extracted. + * @throws KeyStoreException if the underlying method calls throw this exception. + * @throws KeyLoadingException if the underlying method calls throw this exception. + */ + private void rollBackSigningKeysAndCertsChanges( + final Map pfxPrivateKeys, final Map pfxCertificates) + throws KeyStoreException, KeyLoadingException { + + final AtomicLong cleanupErrorCount = new AtomicLong(0); + iterateAddressBook(addressBook, (i, nodeId, address, nodeAlias) -> { + // private key rollback + if (isLocal(address) && pfxPrivateKeys.containsKey(address.getNodeId())) { + try { + Files.deleteIfExists(privateKeyStore(nodeAlias, KeyCertPurpose.SIGNING)); + } catch (final IOException e) { + cleanupErrorCount.incrementAndGet(); + } + } + // certificate rollback + if (pfxCertificates.containsKey(address.getNodeId())) { + try { + Files.deleteIfExists(certificateStore(nodeAlias, KeyCertPurpose.SIGNING)); + } catch (final IOException e) { + cleanupErrorCount.incrementAndGet(); + } + } + }); + if (cleanupErrorCount.get() > 0) { + logger.error( + ERROR.getMarker(), + "Failed to rollback {} pem files created. Manual cleanup required.", + cleanupErrorCount.get()); + throw new IllegalStateException("Cryptography Migration failed to generate or validate PEM files."); + } + } + + /** + * Move the PFX files to the OLD_PFX_KEYS subdirectory. + * + * @throws KeyStoreException if the underlying method calls throw this exception. + * @throws KeyLoadingException if the underlying method calls throw this exception. + */ + private void cleanupByMovingPfxFilesToSubDirectory() throws KeyStoreException, KeyLoadingException { + final AtomicLong cleanupErrorCount = new AtomicLong(0); + + final String archiveDirectory = ".archive"; + final String now = DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss").format(LocalDateTime.now()); + final String newDirectory = archiveDirectory + File.pathSeparator + now; + final Path pfxArchiveDirectory = keyStoreDirectory.resolve(archiveDirectory); + final Path pfxDateDirectory = pfxArchiveDirectory.resolve(now); + + logger.info(STARTUP.getMarker(), "Cryptography Migration Cleanup: Moving PFX files to {}", pfxDateDirectory); + + if (!Files.exists(pfxDateDirectory)) { + try { + if (!Files.exists(pfxArchiveDirectory)) { + Files.createDirectory(pfxArchiveDirectory); + } + Files.createDirectory(pfxDateDirectory); + } catch (final IOException e) { + logger.error( + ERROR.getMarker(), + "Failed to create [{}] subdirectory. Manual cleanup required.", + newDirectory); + return; + } + } + iterateAddressBook(addressBook, (i, nodeId, address, nodeAlias) -> { + if (isLocal(address)) { + // move private key PFX files per local node + final File sPrivatePfx = legacyPrivateKeyStore(nodeAlias).toFile(); + if (sPrivatePfx.exists() + && sPrivatePfx.isFile() + && !sPrivatePfx.renameTo( + pfxDateDirectory.resolve(sPrivatePfx.getName()).toFile())) { + cleanupErrorCount.incrementAndGet(); + } + } + }); + final File sPublicPfx = legacyCertificateStore().toFile(); + if (sPublicPfx.exists() + && sPublicPfx.isFile() + && !sPublicPfx.renameTo( + pfxArchiveDirectory.resolve(sPublicPfx.getName()).toFile())) { + cleanupErrorCount.incrementAndGet(); + } + if (cleanupErrorCount.get() > 0) { + logger.error( + ERROR.getMarker(), + "Failed to move {} PFX files to [{}] subdirectory. Manual cleanup required.", + cleanupErrorCount.get(), + newDirectory); + throw new IllegalStateException( + "Cryptography Migration failed to move PFX files to [" + newDirectory + "] subdirectory."); + } + } + + /** + * Write the provided encoded key or certificate as a base64 DER encoded PEM file to the provided location. + * + * @param isPrivateKey true if the encoded data is a private key; false if it is a certificate. + * @param location the location to write the PEM file. + * @param encoded the byte encoded data to write to the PEM file. + * @throws IOException if an error occurred while writing the PEM file. + */ + private static void writePemFile( + final boolean isPrivateKey, @NonNull final Path location, @NonNull final byte[] encoded) + throws IOException { + final PemObject pemObj = new PemObject(isPrivateKey ? "PRIVATE KEY" : "CERTIFICATE", encoded); + try (final FileOutputStream file = new FileOutputStream(location.toFile(), false); + final var out = new OutputStreamWriter(file); + final PemWriter writer = new PemWriter(out)) { + writer.writeObject(pemObj); + file.getFD().sync(); + } + } } diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/Tipset.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/Tipset.java index 594a12060027..90365720de90 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/Tipset.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/Tipset.java @@ -16,9 +16,10 @@ package com.swirlds.platform.event.creation.tipset; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.swirlds.common.platform.NodeId; -import com.swirlds.platform.system.address.Address; -import com.swirlds.platform.system.address.AddressBook; +import com.swirlds.platform.roster.RosterUtils; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.Arrays; import java.util.List; @@ -29,7 +30,7 @@ */ public class Tipset { - private final AddressBook addressBook; + private final Roster roster; /** * The tip generations, indexed by node index. @@ -45,11 +46,11 @@ public class Tipset { /** * Create an empty tipset. * - * @param addressBook the current address book + * @param roster the current address book */ - public Tipset(@NonNull final AddressBook addressBook) { - this.addressBook = Objects.requireNonNull(addressBook); - tips = new long[addressBook.getSize()]; + public Tipset(@NonNull final Roster roster) { + this.roster = Objects.requireNonNull(roster); + tips = new long[roster.rosterEntries().size()]; // Necessary because we currently start at generation 0, not generation 1. Arrays.fill(tips, UNDEFINED); @@ -62,7 +63,7 @@ public Tipset(@NonNull final AddressBook addressBook) { * @return a new empty tipset */ private static @NonNull Tipset buildEmptyTipset(@NonNull final Tipset tipset) { - return new Tipset(tipset.addressBook); + return new Tipset(tipset.roster); } /** @@ -104,8 +105,8 @@ public Tipset(@NonNull final AddressBook addressBook) { * @return the tip generation for the node */ public long getTipGenerationForNode(@NonNull final NodeId nodeId) { - final int index = addressBook.getIndexOfNodeId(nodeId); - if (index == AddressBook.NOT_IN_ADDRESS_BOOK_INDEX) { + final int index = RosterUtils.getIndex(roster, nodeId.id()); + if (index == -1) { return UNDEFINED; } return tips[index]; @@ -128,7 +129,7 @@ public int size() { * @return this object */ public @NonNull Tipset advance(@NonNull final NodeId creator, final long generation) { - final int index = addressBook.getIndexOfNodeId(creator); + final int index = RosterUtils.getIndex(roster, creator.id()); tips[index] = Math.max(tips[index], generation); return this; } @@ -158,7 +159,7 @@ public TipsetAdvancementWeight getTipAdvancementWeight(@NonNull final NodeId sel long nonZeroWeight = 0; long zeroWeightCount = 0; - final int selfIndex = addressBook.getIndexOfNodeId(selfId); + final int selfIndex = RosterUtils.getIndex(roster, selfId.id()); for (int index = 0; index < tips.length; index++) { if (index == selfIndex) { // We don't consider self advancement here, since self advancement does nothing to help consensus. @@ -166,13 +167,13 @@ public TipsetAdvancementWeight getTipAdvancementWeight(@NonNull final NodeId sel } if (this.tips[index] < that.tips[index]) { - final NodeId nodeId = addressBook.getNodeId(index); - final Address address = addressBook.getAddress(nodeId); + final RosterEntry address = roster.rosterEntries().get(index); + final NodeId nodeId = NodeId.of(address.nodeId()); - if (address.getWeight() == 0) { + if (address.weight() == 0) { zeroWeightCount += 1; } else { - nonZeroWeight += address.getWeight(); + nonZeroWeight += address.weight(); } } } @@ -187,7 +188,7 @@ public TipsetAdvancementWeight getTipAdvancementWeight(@NonNull final NodeId sel public String toString() { final StringBuilder sb = new StringBuilder("("); for (int index = 0; index < tips.length; index++) { - sb.append(addressBook.getNodeId(index)).append(":").append(tips[index]); + sb.append(roster.rosterEntries().get(index).nodeId()).append(":").append(tips[index]); if (index < tips.length - 1) { sb.append(", "); } diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetEventCreator.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetEventCreator.java index d7d0a910a23e..2ceffbdb66b3 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetEventCreator.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetEventCreator.java @@ -20,6 +20,7 @@ import static com.swirlds.platform.event.creation.tipset.TipsetAdvancementWeight.ZERO_ADVANCEMENT_WEIGHT; import static com.swirlds.platform.system.events.EventConstants.CREATOR_ID_UNDEFINED; +import com.hedera.hapi.node.state.roster.Roster; import com.swirlds.base.time.Time; import com.swirlds.common.context.PlatformContext; import com.swirlds.common.crypto.Cryptography; @@ -37,8 +38,8 @@ import com.swirlds.platform.event.hashing.PbjStreamHasher; import com.swirlds.platform.event.hashing.UnsignedEventHasher; import com.swirlds.platform.eventhandling.EventConfig; +import com.swirlds.platform.roster.RosterUtils; import com.swirlds.platform.system.SoftwareVersion; -import com.swirlds.platform.system.address.AddressBook; import com.swirlds.platform.system.events.EventDescriptorWrapper; import com.swirlds.platform.system.events.UnsignedEvent; import edu.umd.cs.findbugs.annotations.NonNull; @@ -75,7 +76,7 @@ public class TipsetEventCreator implements EventCreator { /** * The address book for the current network. */ - private final AddressBook addressBook; + private final Roster roster; /** * The size of the current address book. @@ -128,7 +129,7 @@ public class TipsetEventCreator implements EventCreator { * @param platformContext the platform context * @param random a source of randomness, does not need to be cryptographically secure * @param signer used for signing things with this node's private key - * @param addressBook the current address book + * @param roster the current roster * @param selfId this node's ID * @param softwareVersion the current software version of the application * @param transactionSupplier provides transactions to be included in new events @@ -137,7 +138,7 @@ public TipsetEventCreator( @NonNull final PlatformContext platformContext, @NonNull final Random random, @NonNull final Signer signer, - @NonNull final AddressBook addressBook, + @NonNull final Roster roster, @NonNull final NodeId selfId, @NonNull final SoftwareVersion softwareVersion, @NonNull final TransactionSupplier transactionSupplier) { @@ -148,23 +149,23 @@ public TipsetEventCreator( this.selfId = Objects.requireNonNull(selfId); this.transactionSupplier = Objects.requireNonNull(transactionSupplier); this.softwareVersion = Objects.requireNonNull(softwareVersion); - this.addressBook = Objects.requireNonNull(addressBook); + this.roster = Objects.requireNonNull(roster); final EventCreationConfig eventCreationConfig = platformContext.getConfiguration().getConfigData(EventCreationConfig.class); cryptography = platformContext.getCryptography(); antiSelfishnessFactor = Math.max(1.0, eventCreationConfig.antiSelfishnessFactor()); - tipsetMetrics = new TipsetMetrics(platformContext, addressBook); + tipsetMetrics = new TipsetMetrics(platformContext, roster); ancientMode = platformContext .getConfiguration() .getConfigData(EventConfig.class) .getAncientMode(); - tipsetTracker = new TipsetTracker(time, addressBook, ancientMode); + tipsetTracker = new TipsetTracker(time, roster, ancientMode); childlessOtherEventTracker = new ChildlessEventTracker(); - tipsetWeightCalculator = new TipsetWeightCalculator( - platformContext, addressBook, selfId, tipsetTracker, childlessOtherEventTracker); - networkSize = addressBook.getSize(); + tipsetWeightCalculator = + new TipsetWeightCalculator(platformContext, roster, selfId, tipsetTracker, childlessOtherEventTracker); + networkSize = roster.rosterEntries().size(); zeroAdvancementWeightLogger = new RateLimitedLogger(logger, time, Duration.ofMinutes(1)); noParentFoundLogger = new RateLimitedLogger(logger, time, Duration.ofMinutes(1)); @@ -183,7 +184,7 @@ public void registerEvent(@NonNull final PlatformEvent event) { } final NodeId eventCreator = event.getCreatorId(); - if (!addressBook.contains(eventCreator)) { + if (RosterUtils.getIndex(roster, eventCreator.id()) == -1) { return; } final boolean selfEvent = eventCreator.equals(selfId); diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetMetrics.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetMetrics.java index d878b9b7adf5..bfab951731c7 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetMetrics.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetMetrics.java @@ -16,13 +16,13 @@ package com.swirlds.platform.event.creation.tipset; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.swirlds.common.context.PlatformContext; import com.swirlds.common.metrics.RunningAverageMetric; import com.swirlds.common.metrics.SpeedometerMetric; import com.swirlds.common.platform.NodeId; import com.swirlds.metrics.api.Metrics; -import com.swirlds.platform.system.address.Address; -import com.swirlds.platform.system.address.AddressBook; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.HashMap; import java.util.Map; @@ -53,14 +53,14 @@ public class TipsetMetrics { * * @param platformContext the platform context */ - public TipsetMetrics(@NonNull final PlatformContext platformContext, @NonNull final AddressBook addressBook) { + public TipsetMetrics(@NonNull final PlatformContext platformContext, @NonNull final Roster roster) { final Metrics metrics = platformContext.getMetrics(); tipsetAdvancementMetric = metrics.getOrCreate(TIPSET_ADVANCEMENT_CONFIG); selfishnessMetric = metrics.getOrCreate(SELFISHNESS_CONFIG); - for (final Address address : addressBook) { - final NodeId nodeId = address.getNodeId(); + for (final RosterEntry address : roster.rosterEntries()) { + final NodeId nodeId = NodeId.of(address.nodeId()); final SpeedometerMetric.Config parentConfig = new SpeedometerMetric.Config( "platform", "tipsetParent" + nodeId.id()) diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetTracker.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetTracker.java index 20336f1b32fe..0b947d1449ad 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetTracker.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetTracker.java @@ -19,6 +19,7 @@ import static com.swirlds.logging.legacy.LogMarker.EXCEPTION; import static com.swirlds.platform.event.creation.tipset.Tipset.merge; +import com.hedera.hapi.node.state.roster.Roster; import com.swirlds.base.time.Time; import com.swirlds.common.platform.NodeId; import com.swirlds.common.utility.throttle.RateLimitedLogger; @@ -26,7 +27,6 @@ import com.swirlds.platform.event.AncientMode; import com.swirlds.platform.sequence.map.SequenceMap; import com.swirlds.platform.sequence.map.StandardSequenceMap; -import com.swirlds.platform.system.address.AddressBook; import com.swirlds.platform.system.events.EventDescriptorWrapper; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -58,7 +58,7 @@ public class TipsetTracker { */ private Tipset latestGenerations; - private final AddressBook addressBook; + private final Roster roster; private final AncientMode ancientMode; private EventWindow eventWindow; @@ -69,15 +69,15 @@ public class TipsetTracker { * Create a new tipset tracker. * * @param time provides wall clock time - * @param addressBook the current address book + * @param roster the current roster * @param ancientMode the {@link AncientMode} to use */ public TipsetTracker( - @NonNull final Time time, @NonNull final AddressBook addressBook, @NonNull final AncientMode ancientMode) { + @NonNull final Time time, @NonNull final Roster roster, @NonNull final AncientMode ancientMode) { - this.addressBook = Objects.requireNonNull(addressBook); + this.roster = Objects.requireNonNull(roster); - this.latestGenerations = new Tipset(addressBook); + this.latestGenerations = new Tipset(roster); if (ancientMode == AncientMode.BIRTH_ROUND_THRESHOLD) { tipsets = new StandardSequenceMap<>(0, INITIAL_TIPSET_MAP_CAPACITY, true, ed -> ed.eventDescriptor() @@ -147,7 +147,7 @@ public Tipset addEvent( final Tipset eventTipset; if (parentTipsets.isEmpty()) { - eventTipset = new Tipset(addressBook) + eventTipset = new Tipset(roster) .advance( eventDescriptorWrapper.creator(), eventDescriptorWrapper.eventDescriptor().generation()); @@ -199,7 +199,7 @@ public int size() { */ public void clear() { eventWindow = EventWindow.getGenesisEventWindow(ancientMode); - latestGenerations = new Tipset(addressBook); + latestGenerations = new Tipset(roster); tipsets.clear(); } } diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetWeightCalculator.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetWeightCalculator.java index 233569b6461a..a3c8abf52faa 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetWeightCalculator.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/event/creation/tipset/TipsetWeightCalculator.java @@ -20,11 +20,12 @@ import static com.swirlds.logging.legacy.LogMarker.EXCEPTION; import static com.swirlds.platform.event.creation.tipset.TipsetAdvancementWeight.ZERO_ADVANCEMENT_WEIGHT; +import com.hedera.hapi.node.state.roster.Roster; import com.swirlds.common.context.PlatformContext; import com.swirlds.common.platform.NodeId; import com.swirlds.common.utility.throttle.RateLimitedLogger; import com.swirlds.platform.event.creation.EventCreationConfig; -import com.swirlds.platform.system.address.AddressBook; +import com.swirlds.platform.roster.RosterUtils; import com.swirlds.platform.system.events.EventDescriptorWrapper; import edu.umd.cs.findbugs.annotations.NonNull; import java.time.Duration; @@ -101,7 +102,7 @@ public class TipsetWeightCalculator { */ private Tipset latestSelfEventTipset; - private final AddressBook addressBook; + private final Roster roster; private final RateLimitedLogger ancientParentLogger; private final RateLimitedLogger allParentsAreAncientLogger; @@ -110,14 +111,14 @@ public class TipsetWeightCalculator { * Create a new tipset weight calculator. * * @param platformContext the platform context - * @param addressBook the current address book + * @param roster the current roster * @param selfId the ID of the node tracked by this object * @param tipsetTracker builds tipsets for individual events * @param childlessEventTracker tracks non-ancient events without children */ public TipsetWeightCalculator( @NonNull final PlatformContext platformContext, - @NonNull final AddressBook addressBook, + @NonNull final Roster roster, @NonNull final NodeId selfId, @NonNull final TipsetTracker tipsetTracker, @NonNull final ChildlessEventTracker childlessEventTracker) { @@ -125,17 +126,17 @@ public TipsetWeightCalculator( this.selfId = Objects.requireNonNull(selfId); this.tipsetTracker = Objects.requireNonNull(tipsetTracker); this.childlessEventTracker = Objects.requireNonNull(childlessEventTracker); - this.addressBook = Objects.requireNonNull(addressBook); + this.roster = Objects.requireNonNull(roster); - totalWeight = addressBook.getTotalWeight(); - selfWeight = addressBook.getAddress(selfId).getWeight(); + totalWeight = RosterUtils.computeTotalWeight(roster); + selfWeight = RosterUtils.getRosterEntry(roster, selfId.id()).weight(); maximumPossibleAdvancementWeight = totalWeight - selfWeight; maxSnapshotHistorySize = platformContext .getConfiguration() .getConfigData(EventCreationConfig.class) .tipsetSnapshotHistorySize(); - snapshot = new Tipset(addressBook); + snapshot = new Tipset(roster); latestSelfEventTipset = snapshot; snapshotHistory.add(snapshot); @@ -330,7 +331,7 @@ public int getSelfishnessScoreForNode(@NonNull final NodeId nodeId) { * Clear the tipset weight calculator to its initial state. */ public void clear() { - snapshot = new Tipset(addressBook); + snapshot = new Tipset(roster); latestSelfEventTipset = snapshot; snapshotHistory.clear(); snapshotHistory.add(snapshot); diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManagerImpl.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManagerImpl.java index 3eda72ed8554..25f7ce72a4b3 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManagerImpl.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/FallenBehindManagerImpl.java @@ -30,6 +30,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import org.hiero.consensus.gossip.FallenBehindManager; /** * A thread-safe implementation of {@link FallenBehindManager} diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/shadowgraph/ShadowgraphSynchronizer.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/shadowgraph/ShadowgraphSynchronizer.java index b116d8c75cdf..f8e4d285a098 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/shadowgraph/ShadowgraphSynchronizer.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/shadowgraph/ShadowgraphSynchronizer.java @@ -36,7 +36,6 @@ import com.swirlds.platform.event.AncientMode; import com.swirlds.platform.event.PlatformEvent; import com.swirlds.platform.eventhandling.EventConfig; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.gossip.IntakeEventCounter; import com.swirlds.platform.gossip.SyncException; import com.swirlds.platform.gossip.sync.config.SyncConfig; @@ -59,6 +58,7 @@ import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.hiero.consensus.gossip.FallenBehindManager; /** * The goal of the ShadowgraphSynchronizer is to compare graphs with a remote node, and update them so both sides have diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/SyncManagerImpl.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/SyncManagerImpl.java index 1bfecf952871..b6f40a27f5a7 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/SyncManagerImpl.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/SyncManagerImpl.java @@ -22,12 +22,12 @@ import com.swirlds.common.context.PlatformContext; import com.swirlds.common.metrics.FunctionGauge; import com.swirlds.common.platform.NodeId; -import com.swirlds.platform.gossip.FallenBehindManager; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.List; import java.util.Objects; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.hiero.consensus.gossip.FallenBehindManager; /** * A class that manages information about who we need to sync with, and whether we need to reconnect diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/protocol/SyncProtocol.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/protocol/SyncProtocol.java index fe8385320159..22d23bd68066 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/protocol/SyncProtocol.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/gossip/sync/protocol/SyncProtocol.java @@ -22,7 +22,6 @@ import com.swirlds.common.platform.NodeId; import com.swirlds.common.threading.pool.ParallelExecutionException; import com.swirlds.platform.Utilities; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.gossip.IntakeEventCounter; import com.swirlds.platform.gossip.SyncException; import com.swirlds.platform.gossip.permits.SyncPermitProvider; @@ -39,6 +38,7 @@ import java.util.Objects; import java.util.function.BooleanSupplier; import java.util.function.Supplier; +import org.hiero.consensus.gossip.FallenBehindManager; /** * Executes the sync protocol where events are exchanged with a peer and all events are sent and received in topological diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/ReconnectProtocolFactory.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/ReconnectProtocolFactory.java index 90dde3a2269d..5a75b8f04ddc 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/ReconnectProtocolFactory.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/ReconnectProtocolFactory.java @@ -21,7 +21,6 @@ import com.swirlds.common.platform.NodeId; import com.swirlds.common.threading.manager.ThreadManager; import com.swirlds.config.api.Configuration; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.metrics.ReconnectMetrics; import com.swirlds.platform.reconnect.ReconnectController; import com.swirlds.platform.reconnect.ReconnectProtocol; @@ -33,6 +32,7 @@ import java.time.Duration; import java.util.Objects; import java.util.function.Supplier; +import org.hiero.consensus.gossip.FallenBehindManager; /** * Implementation of a factory for reconnect protocol diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/SyncProtocolFactory.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/SyncProtocolFactory.java index 52ef7eb26d75..90369ef1828a 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/SyncProtocolFactory.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/network/protocol/SyncProtocolFactory.java @@ -18,7 +18,6 @@ import com.swirlds.common.context.PlatformContext; import com.swirlds.common.platform.NodeId; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.gossip.IntakeEventCounter; import com.swirlds.platform.gossip.permits.SyncPermitProvider; import com.swirlds.platform.gossip.shadowgraph.ShadowgraphSynchronizer; @@ -30,6 +29,7 @@ import java.util.Objects; import java.util.function.BooleanSupplier; import java.util.function.Supplier; +import org.hiero.consensus.gossip.FallenBehindManager; /** * Implementation of a factory for sync protocol diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/reconnect/ReconnectProtocol.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/reconnect/ReconnectProtocol.java index 1f21f3b57e4c..7ae99395b028 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/reconnect/ReconnectProtocol.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/reconnect/ReconnectProtocol.java @@ -26,7 +26,6 @@ import com.swirlds.common.threading.manager.ThreadManager; import com.swirlds.common.utility.throttle.RateLimitedLogger; import com.swirlds.config.api.Configuration; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.metrics.ReconnectMetrics; import com.swirlds.platform.network.Connection; import com.swirlds.platform.network.NetworkProtocolException; @@ -41,6 +40,7 @@ import java.util.function.Supplier; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.hiero.consensus.gossip.FallenBehindManager; /** * Implements the reconnect protocol over a bidirectional network diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterEntryNotFoundException.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterEntryNotFoundException.java new file mode 100644 index 000000000000..3777d330c9b2 --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterEntryNotFoundException.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.roster; + +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * An exception thrown when a RosterEntry cannot be found, e.g. when searching by NodeId. + */ +public class RosterEntryNotFoundException extends RuntimeException { + /** + * A default constructor. + * @param message a message + */ + public RosterEntryNotFoundException(@NonNull final String message) { + super(message); + } +} diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterUtils.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterUtils.java index 56fab834e360..2afb94c81669 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterUtils.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/roster/RosterUtils.java @@ -17,9 +17,12 @@ package com.swirlds.platform.roster; import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.swirlds.common.crypto.Hash; +import com.swirlds.platform.crypto.CryptoStatic; import com.swirlds.platform.util.PbjRecordHasher; import edu.umd.cs.findbugs.annotations.NonNull; +import java.security.cert.X509Certificate; /** * A utility class to help use Rooster and RosterEntry instances. @@ -42,6 +45,16 @@ public static String formatNodeName(final long nodeId) { return "node" + (nodeId + 1); } + /** + * Fetch the gossip certificate from a given RosterEntry. + * + * @param entry a RosterEntry + * @return a gossip certificate + */ + public static X509Certificate fetchGossipCaCertificate(@NonNull final RosterEntry entry) { + return CryptoStatic.decodeCertificate(entry.gossipCaCertificate().toByteArray()); + } + /** * Create a Hash object for a given Roster instance. * @@ -52,4 +65,52 @@ public static String formatNodeName(final long nodeId) { public static Hash hash(@NonNull final Roster roster) { return PBJ_RECORD_HASHER.hash(roster, Roster.PROTOBUF); } + + /** + * Return an index of a RosterEntry with a given node id. + * + * @param roster a Roster + * @param nodeId a node id + * @return an index, or -1 if not found + */ + public static int getIndex(@NonNull final Roster roster, final long nodeId) { + for (int i = 0; i < roster.rosterEntries().size(); i++) { + if (roster.rosterEntries().get(i).nodeId() == nodeId) { + return i; + } + } + return -1; + } + + /** + * Compute the total weight of a Roster which is a sum of weights of all the RosterEntries. + * + * @param roster a roster + * @return the total weight + */ + public static long computeTotalWeight(@NonNull final Roster roster) { + return roster.rosterEntries().stream().mapToLong(RosterEntry::weight).sum(); + } + + /** + * Returns a RosterEntry with a given nodeId by simply iterating all entries, + * w/o building a temporary map. + * + * Useful for one-off look-ups. If code needs to look up multiple entries by NodeId, + * then the code should use the RosterUtils.toMap() method and keep the map instance + * for the look-ups. + * + * @param roster a roster + * @param nodeId a node id + * @return a RosterEntry + * @throws RosterEntryNotFoundException if RosterEntry is not found in Roster + */ + public static RosterEntry getRosterEntry(@NonNull final Roster roster, final long nodeId) { + for (final RosterEntry entry : roster.rosterEntries()) { + if (entry.nodeId() == nodeId) { + return entry; + } + } + throw new RosterEntryNotFoundException("No RosterEntry with nodeId: " + nodeId + " in Roster: " + roster); + } } diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/ReadableRosterStore.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/ReadableRosterStore.java new file mode 100644 index 000000000000..11b701fbe415 --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/ReadableRosterStore.java @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.state.service; + +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * Read-only implementation for accessing rosters states. + */ +public interface ReadableRosterStore { + /** + * Gets the candidate roster if found in state or null otherwise. + * Note that state commits are buffered, + * so it is possible that a recently stored candidate roster is still in the batched changes and not yet committed. + * Therefore, callers of this API must bear in mind that an immediate call after storing a candidate roster may return null. + * + * @return the candidate roster + */ + @Nullable + Roster getCandidateRoster(); + + /** + * Gets the active roster. + * Returns the active roster iff: + * the roster state singleton is not null + * the list of round roster pairs is not empty + * the first round roster pair exists + * the active roster hash is present in the roster map + * otherwise returns null. + * @return the active roster + */ + @Nullable + Roster getActiveRoster(); + + /** + * Get the roster based on roster hash + * + * @param rosterHash The roster hash + * @return The roster. + */ + @Nullable + Roster get(@NonNull Bytes rosterHash); +} diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/ReadableRosterStoreImpl.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/ReadableRosterStoreImpl.java new file mode 100644 index 000000000000..54ecb2608f5c --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/ReadableRosterStoreImpl.java @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.state.service; + +import com.hedera.hapi.node.state.primitives.ProtoBytes; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterState; +import com.hedera.hapi.node.state.roster.RoundRosterPair; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.RosterStateId; +import com.swirlds.state.spi.ReadableKVState; +import com.swirlds.state.spi.ReadableSingletonState; +import com.swirlds.state.spi.ReadableStates; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.Objects; + +/** + * Provides read-only methods for interacting with the underlying data storage mechanisms for + * working with Rosters. + */ +public class ReadableRosterStoreImpl implements ReadableRosterStore { + + /** + * The roster state singleton. This is the state that holds the candidate roster hash and the list of pairs of round + * and active roster hashes. + */ + private final ReadableSingletonState rosterState; + + /** + * The key-value map of roster hashes and rosters. + */ + private final ReadableKVState rosterMap; + + /** + * Create a new {@link ReadableRosterStore} instance. + * + * @param readableStates The state to use. + */ + public ReadableRosterStoreImpl(@NonNull final ReadableStates readableStates) { + Objects.requireNonNull(readableStates); + this.rosterState = readableStates.getSingleton(RosterStateId.ROSTER_STATES_KEY); + this.rosterMap = readableStates.get(RosterStateId.ROSTER_KEY); + } + + /** {@inheritDoc} */ + @Nullable + @Override + public Roster getCandidateRoster() { + final RosterState rosterStateSingleton = rosterState.get(); + if (rosterStateSingleton == null) { + return null; + } + final Bytes candidateRosterHash = rosterStateSingleton.candidateRosterHash(); + return rosterMap.get(ProtoBytes.newBuilder().value(candidateRosterHash).build()); + } + + /** {@inheritDoc} */ + @Nullable + @Override + public Roster getActiveRoster() { + final RosterState rosterStateSingleton = rosterState.get(); + if (rosterStateSingleton == null) { + return null; + } + final List rostersAndRounds = rosterStateSingleton.roundRosterPairs(); + if (rostersAndRounds.isEmpty()) { + return null; + } + // by design, the first round roster pair is the active roster + // this may need to be revisited when we reach DAB + final RoundRosterPair latestRoundRosterPair = rostersAndRounds.getFirst(); + final Bytes activeRosterHash = latestRoundRosterPair.activeRosterHash(); + return rosterMap.get(ProtoBytes.newBuilder().value(activeRosterHash).build()); + } + + /** {@inheritDoc} */ + @Nullable + @Override + public Roster get(@NonNull final Bytes rosterHash) { + return rosterMap.get(ProtoBytes.newBuilder().value(rosterHash).build()); + } +} diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/WritableRosterStore.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/WritableRosterStore.java new file mode 100644 index 000000000000..1c320de238a3 --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/WritableRosterStore.java @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.state.service; + +import static java.util.Objects.requireNonNull; + +import com.hedera.hapi.node.state.primitives.ProtoBytes; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterState; +import com.hedera.hapi.node.state.roster.RosterState.Builder; +import com.hedera.hapi.node.state.roster.RoundRosterPair; +import com.hedera.hapi.platform.state.PlatformState; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.RosterStateId; +import com.swirlds.platform.roster.RosterUtils; +import com.swirlds.platform.roster.RosterValidator; +import com.swirlds.state.spi.WritableKVState; +import com.swirlds.state.spi.WritableSingletonState; +import com.swirlds.state.spi.WritableStates; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.LinkedList; +import java.util.List; + +/** + * Read-write implementation for accessing rosters states. + */ +public class WritableRosterStore extends ReadableRosterStoreImpl { + + /** + * The maximum number of active rosters to keep in the roster state. + */ + public static final int MAXIMUM_ROSTER_HISTORY_SIZE = 2; + + /** + * The roster state singleton. This is the state that holds the candidate roster hash and the list of pairs of + * active roster hashes and the round number in which those rosters became active. + * + * @implNote the use of {@link ReadablePlatformStateStore} and {@link WritablePlatformStateStore} to provide access + * to the roster states (beyond just the {@link PlatformState}) is deliberate, for convenience. + */ + private final WritableSingletonState rosterState; + + private final WritableKVState rosterMap; + + /** + * Constructs a new {@link WritableRosterStore} instance. + * + * @param writableStates the readable states + */ + public WritableRosterStore(@NonNull final WritableStates writableStates) { + super(writableStates); + requireNonNull(writableStates); + this.rosterState = writableStates.getSingleton(RosterStateId.ROSTER_STATES_KEY); + this.rosterMap = writableStates.get(RosterStateId.ROSTER_KEY); + } + + /** + * Sets the candidate roster in state. + * Setting the candidate roster indicates that this roster should be adopted as the active roster when required. + * + * @param candidateRoster a candidate roster to set. It must be a valid roster. + */ + public void putCandidateRoster(@NonNull final Roster candidateRoster) { + requireNonNull(candidateRoster); + RosterValidator.validate(candidateRoster); + + final Bytes incomingCandidateRosterHash = + RosterUtils.hash(candidateRoster).getBytes(); + + // update the roster state/map + final RosterState previousRosterState = rosterStateOrThrow(); + final Bytes previousCandidateRosterHash = previousRosterState.candidateRosterHash(); + final Builder newRosterStateBuilder = + previousRosterState.copyBuilder().candidateRosterHash(incomingCandidateRosterHash); + removeRoster(previousCandidateRosterHash); + + rosterState.put(newRosterStateBuilder.build()); + rosterMap.put(ProtoBytes.newBuilder().value(incomingCandidateRosterHash).build(), candidateRoster); + } + + /** + * Sets the Active roster. + * This will be called to store a new Active Roster in the state. + * The roster must be valid according to rules codified in {@link com.swirlds.platform.roster.RosterValidator}. + * + * @param roster an active roster to set + * @param round the round number in which the roster became active. + * It must be a positive number greater than the round number of the current active roster. + */ + public void putActiveRoster(@NonNull final Roster roster, final long round) { + requireNonNull(roster); + RosterValidator.validate(roster); + + // update the roster state + final RosterState previousRosterState = rosterStateOrThrow(); + final List roundRosterPairs = new LinkedList<>(previousRosterState.roundRosterPairs()); + if (!roundRosterPairs.isEmpty()) { + final RoundRosterPair activeRosterPair = roundRosterPairs.getFirst(); + if (round < 0 || round <= activeRosterPair.roundNumber()) { + throw new IllegalArgumentException( + "incoming round number must be greater than the round number of the current active roster."); + } + } + final Bytes activeRosterHash = RosterUtils.hash(roster).getBytes(); + roundRosterPairs.addFirst(new RoundRosterPair(round, activeRosterHash)); + + if (roundRosterPairs.size() > MAXIMUM_ROSTER_HISTORY_SIZE) { + final RoundRosterPair lastRemovedRoster = roundRosterPairs.removeLast(); + removeRoster(lastRemovedRoster.activeRosterHash()); + + // At this phase of the implementation, the roster state has a fixed size limit for active rosters. + // Future implementations (e.g. DAB) can modify this. + if (roundRosterPairs.size() > MAXIMUM_ROSTER_HISTORY_SIZE) { + // additional safety check to ensure that the roster state does not contain more than set limit. + throw new IllegalStateException( + "Active rosters in the Roster state cannot be more than " + MAXIMUM_ROSTER_HISTORY_SIZE); + } + } + + final Builder newRosterStateBuilder = previousRosterState + .copyBuilder() + .candidateRosterHash(Bytes.EMPTY) + .roundRosterPairs(roundRosterPairs); + // since a new active roster is being set, the existing candidate roster is no longer valid + // so we remove it if it meets removal criteria. + removeRoster(previousRosterState.candidateRosterHash()); + rosterState.put(newRosterStateBuilder.build()); + rosterMap.put(ProtoBytes.newBuilder().value(activeRosterHash).build(), roster); + } + + /** + * Returns the roster state or throws an exception if the state is null. + * @return the roster state + * @throws NullPointerException if the roster state is null + */ + @NonNull + private RosterState rosterStateOrThrow() { + return requireNonNull(rosterState.get()); + } + + /** + * Removes a roster from the roster map, but only if it doesn't match any of the active roster hashes in + * the roster state. The check ensures we don't inadvertently remove a roster still in use. + * + * @param rosterHash the hash of the roster + */ + private void removeRoster(@NonNull final Bytes rosterHash) { + final List activeRosterHistory = rosterStateOrThrow().roundRosterPairs(); + if (activeRosterHistory.stream() + .noneMatch(rosterPair -> rosterPair.activeRosterHash().equals(rosterHash))) { + this.rosterMap.remove(ProtoBytes.newBuilder().value(rosterHash).build()); + } + } +} diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/schemas/V0540RosterSchema.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/schemas/V0540RosterSchema.java similarity index 92% rename from hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/schemas/V0540RosterSchema.java rename to platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/schemas/V0540RosterSchema.java index f76fcf016789..119d2eeb7fe4 100644 --- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/roster/schemas/V0540RosterSchema.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/state/service/schemas/V0540RosterSchema.java @@ -14,7 +14,10 @@ * limitations under the License. */ -package com.hedera.node.app.roster.schemas; +package com.swirlds.platform.state.service.schemas; + +import static com.swirlds.common.RosterStateId.ROSTER_KEY; +import static com.swirlds.common.RosterStateId.ROSTER_STATES_KEY; import com.hedera.hapi.node.base.SemanticVersion; import com.hedera.hapi.node.state.primitives.ProtoBytes; @@ -33,8 +36,6 @@ */ public class V0540RosterSchema extends Schema { private static final Logger log = LogManager.getLogger(V0540RosterSchema.class); - public static final String ROSTER_KEY = "ROSTERS"; - public static final String ROSTER_STATES_KEY = "ROSTER_STATE"; /** this can't be increased later so we pick some number large enough, 2^16. */ private static final long MAX_ROSTERS = 65_536L; diff --git a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/system/address/AddressBookUtils.java b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/system/address/AddressBookUtils.java index eeef09e6c39b..0b5853ae4aac 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/system/address/AddressBookUtils.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/com/swirlds/platform/system/address/AddressBookUtils.java @@ -16,7 +16,6 @@ package com.swirlds.platform.system.address; -import static com.swirlds.base.utility.NetworkUtils.isNameResolvable; import static com.swirlds.platform.util.BootstrapUtils.detectSoftwareUpgrade; import com.hedera.hapi.node.base.ServiceEndpoint; @@ -120,6 +119,10 @@ public static AddressBook parseAddressBookText(@NonNull final String addressBook if (address != null) { addressBook.add(address); } + } else if (trimmedLine.startsWith("nextNodeId")) { + // As of release 0.56, nextNodeId is not used and ignored. + // CI/CD pipelines need to be updated to remove this field from files. + // Future Work: remove this case and hard fail when nextNodeId is no longer present in CI/CD pipelines. } else { throw new ParseException( "The line [%s] does not start with `%s`." @@ -177,9 +180,6 @@ public static Address parseAddressText(@NonNull final String addressText) throws } // FQDN Support: The original string value is preserved, whether it is an IP Address or a FQDN. final String internalHostname = parts[5]; - if (!isNameResolvable(internalHostname)) { - throw new ParseException("Cannot parse ip address from '" + internalHostname + "'", 5); - } final int internalPort; try { internalPort = Integer.parseInt(parts[6]); @@ -188,9 +188,6 @@ public static Address parseAddressText(@NonNull final String addressText) throws } // FQDN Support: The original string value is preserved, whether it is an IP Address or a FQDN. final String externalHostname = parts[7]; - if (!isNameResolvable(externalHostname)) { - throw new ParseException("Cannot parse ip address from '" + externalHostname + "'", 7); - } final int externalPort; try { externalPort = Integer.parseInt(parts[8]); diff --git a/platform-sdk/swirlds-platform-core/src/main/java/module-info.java b/platform-sdk/swirlds-platform-core/src/main/java/module-info.java index fa9fe0db2a6e..0696e4782cf7 100644 --- a/platform-sdk/swirlds-platform-core/src/main/java/module-info.java +++ b/platform-sdk/swirlds-platform-core/src/main/java/module-info.java @@ -140,6 +140,7 @@ requires transitive com.hedera.pbj.runtime; requires transitive info.picocli; requires transitive org.apache.logging.log4j; + requires transitive org.hiero.consensus.gossip; requires com.swirlds.config.extensions; requires com.swirlds.logging; requires com.swirlds.merkle; diff --git a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoaderTest.java b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoaderTest.java index 4cbb9d556c71..71425965e30a 100644 --- a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoaderTest.java +++ b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/crypto/EnhancedKeyStoreLoaderTest.java @@ -33,7 +33,9 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.KeyStoreException; +import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; @@ -113,6 +115,7 @@ void keyStoreLoaderPositiveTest(final String directoryName) assertThat(keyDirectory).exists().isDirectory().isReadable().isNotEmptyDirectory(); assertThat(loader).isNotNull(); + assertThatCode(loader::migrate).doesNotThrowAnyException(); assertThatCode(loader::scan).doesNotThrowAnyException(); assertThatCode(loader::generateIfNecessary).doesNotThrowAnyException(); assertThatCode(loader::verify).doesNotThrowAnyException(); @@ -159,6 +162,7 @@ void keyStoreLoaderNegativeCase1Test(final String directoryName) throws IOExcept assertThat(keyDirectory).exists().isDirectory().isReadable().isNotEmptyDirectory(); assertThat(loader).isNotNull(); + assertThatCode(loader::migrate).doesNotThrowAnyException(); assertThatCode(loader::scan).doesNotThrowAnyException(); assertThatCode(loader::verify).isInstanceOf(KeyLoadingException.class); assertThatCode(loader::injectInAddressBook).isInstanceOf(KeyLoadingException.class); @@ -183,9 +187,15 @@ void keyStoreLoaderNegativeCase2Test(final String directoryName) throws IOExcept assertThat(keyDirectory).exists().isDirectory().isReadable().isNotEmptyDirectory(); assertThat(loader).isNotNull(); + assertThatCode(loader::migrate).doesNotThrowAnyException(); assertThatCode(loader::scan).doesNotThrowAnyException(); + assertThatCode(loader::generateIfNecessary).isInstanceOf(KeyGeneratingException.class); assertThatCode(loader::verify).isInstanceOf(KeyLoadingException.class); - assertThatCode(loader::injectInAddressBook).doesNotThrowAnyException(); + if (directoryName.equals("hybrid-invalid-case-2") || directoryName.equals("enhanced-invalid-case-2")) { + assertThatCode(loader::injectInAddressBook).isInstanceOf(KeyLoadingException.class); + } else { + assertThatCode(loader::injectInAddressBook).doesNotThrowAnyException(); + } assertThatCode(loader::keysAndCerts).isInstanceOf(KeyLoadingException.class); } @@ -214,4 +224,53 @@ private Configuration configure(final Path keyDirectory) throws IOException { private AddressBook addressBook() { return loadConfigFile(testDataDirectory.resolve("config.txt")).getAddressBook(); } + + ///////////////////////////////////////////////////////////////////////////// + //////////////////////// MIGRATION SPECIFIC UNIT TESTS ////////////////////// + ///////////////////////////////////////////////////////////////////////////// + + /** + * The Negative Type 2 tests are designed to test the case where the key store loader is able to scan the key + * directory, but one or more private keys are either corrupt or missing. + * + * @param directoryName the directory name containing the test data being used to cover a given test case. + * @throws IOException if an I/O error occurs during test setup. + */ + @ParameterizedTest + @DisplayName("Migration Negative Cases Test") + @ValueSource(strings = {"migration-invalid-missing-private-key", "migration-invalid-missing-public-key"}) + void migraitonNegativeCaseTest(final String directoryName) throws IOException { + final Path keyDirectory = testDataDirectory.resolve(directoryName); + final AddressBook addressBook = addressBook(); + final EnhancedKeyStoreLoader loader = EnhancedKeyStoreLoader.using(addressBook, configure(keyDirectory)); + + assertThat(keyDirectory).exists().isDirectory().isReadable().isNotEmptyDirectory(); + + // read all files into memory for later comparison. + Map fileContents = new HashMap<>(); + try (Stream paths = Files.list(keyDirectory)) { + paths.forEach(path -> { + try { + fileContents.put(path.getFileName().toString(), Files.readAllBytes(path)); + } catch (IOException e) { + assert (false); + } + }); + } + + assertThat(loader).isNotNull(); + assertThatCode(loader::migrate).doesNotThrowAnyException(); + + // check that the migration rolled back the changes and that the files are identical. + try (Stream paths = Files.list(keyDirectory)) { + paths.forEach(path -> { + try { + assertThat(Files.readAllBytes(path)) + .isEqualTo(fileContents.get(path.getFileName().toString())); + } catch (IOException e) { + assert (false); + } + }); + } + } } diff --git a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/reconnect/ReconnectProtocolTests.java b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/reconnect/ReconnectProtocolTests.java index 014e5df044ea..d8dd0a8cb4bf 100644 --- a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/reconnect/ReconnectProtocolTests.java +++ b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/reconnect/ReconnectProtocolTests.java @@ -41,7 +41,6 @@ import com.swirlds.common.utility.ValueReference; import com.swirlds.config.api.Configuration; import com.swirlds.config.extensions.test.fixtures.TestConfigBuilder; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.metrics.ReconnectMetrics; import com.swirlds.platform.network.Connection; import com.swirlds.platform.network.protocol.Protocol; @@ -59,6 +58,7 @@ import java.util.List; import java.util.stream.LongStream; import java.util.stream.Stream; +import org.hiero.consensus.gossip.FallenBehindManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; diff --git a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/state/service/WritableRosterStoreTest.java b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/state/service/WritableRosterStoreTest.java new file mode 100644 index 000000000000..fa81371d1fa4 --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/state/service/WritableRosterStoreTest.java @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.state.service; + +import static com.swirlds.platform.state.service.WritableRosterStore.MAXIMUM_ROSTER_HISTORY_SIZE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.hedera.hapi.node.base.ServiceEndpoint; +import com.hedera.hapi.node.state.primitives.ProtoBytes; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.hapi.node.state.roster.RosterState; +import com.hedera.hapi.node.state.roster.RosterState.Builder; +import com.hedera.hapi.node.state.roster.RoundRosterPair; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.RosterStateId; +import com.swirlds.platform.roster.InvalidRosterException; +import com.swirlds.platform.roster.RosterUtils; +import com.swirlds.state.merkle.singleton.SingletonNode; +import com.swirlds.state.merkle.singleton.WritableSingletonStateImpl; +import com.swirlds.state.spi.WritableKVState; +import com.swirlds.state.spi.WritableSingletonState; +import com.swirlds.state.spi.WritableStates; +import com.swirlds.state.test.fixtures.MapWritableKVState; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +/** + * Tests for the {@link WritableRosterStore} class. + */ +class WritableRosterStoreTest { + + private final WritableStates writableStates = mock(WritableStates.class); + private WritableRosterStore writableRosterStore; + private ReadableRosterStore readableRosterStore; + + @BeforeEach + void setUp() { + final SingletonNode rosterStateSingleton = new SingletonNode<>( + PlatformStateService.NAME, + RosterStateId.ROSTER_STATES_KEY, + 0, + RosterState.PROTOBUF, + new RosterState(null, new LinkedList<>())); + final WritableKVState rosters = MapWritableKVState.builder( + RosterStateId.ROSTER_KEY) + .build(); + when(writableStates.get(RosterStateId.ROSTER_KEY)).thenReturn(rosters); + when(writableStates.getSingleton(RosterStateId.ROSTER_STATES_KEY)) + .thenReturn(new WritableSingletonStateImpl<>(RosterStateId.ROSTER_STATES_KEY, rosterStateSingleton)); + + readableRosterStore = new ReadableRosterStoreImpl(writableStates); + writableRosterStore = new WritableRosterStore(writableStates); + } + + @Test + void testGetReturnsCorrectRoster() { + final Roster expectedRoster = createValidTestRoster(1); + writableRosterStore.putCandidateRoster(expectedRoster); + final Bytes rosterHash = RosterUtils.hash(expectedRoster).getBytes(); + + final Roster actualRoster = readableRosterStore.get(rosterHash); + + assertEquals(expectedRoster, actualRoster, "The returned roster should match the expected roster"); + } + + @Test + void testGetReturnsNullForInvalidHash() { + final Roster expectedRoster = createValidTestRoster(1); + writableRosterStore.putCandidateRoster(expectedRoster); + final Bytes rosterHash = Bytes.EMPTY; + + final Roster actualRoster = readableRosterStore.get(rosterHash); + + assertNull(actualRoster, "The returned roster should be null for an invalid hash"); + } + + @Test + void testSetCandidateRosterReturnsSame() { + final Roster roster1 = createValidTestRoster(1); + writableRosterStore.putCandidateRoster(roster1); + assertEquals( + readableRosterStore.getCandidateRoster(), + roster1, + "Candidate roster should be the same as the one set"); + + final Roster roster2 = createValidTestRoster(2); + writableRosterStore.putCandidateRoster(roster2); + assertEquals(roster2, readableRosterStore.getCandidateRoster(), "Candidate roster should be roster2"); + } + + @Test + void testInvalidRosterThrowsException() { + assertThrows(NullPointerException.class, () -> writableRosterStore.putCandidateRoster(null)); + assertThrows(InvalidRosterException.class, () -> writableRosterStore.putCandidateRoster(Roster.DEFAULT)); + assertThrows(InvalidRosterException.class, () -> writableRosterStore.putActiveRoster(Roster.DEFAULT, 1)); + } + + @Test + void testInvalidRoundNumberThrowsException() { + writableRosterStore.putActiveRoster(createValidTestRoster(2), 1); + final Roster roster = createValidTestRoster(1); + assertThrows(IllegalArgumentException.class, () -> writableRosterStore.putActiveRoster(roster, 0)); + assertThrows(IllegalArgumentException.class, () -> writableRosterStore.putActiveRoster(roster, -1)); + } + + /** + * Tests that setting an active roster returns the active roster when getActiveRoster is called. + */ + @Test + void testGetCandidateRosterWithValidCandidateRoster() { + final Roster activeRoster = createValidTestRoster(1); + assertNull(readableRosterStore.getActiveRoster(), "Active roster should be null initially"); + writableRosterStore.putActiveRoster(activeRoster, 2); + assertSame( + readableRosterStore.getActiveRoster(), + activeRoster, + "Returned active roster should be the same as the one set"); + } + + @Test + void testSetActiveRosterRemovesExistingCandidateRoster() { + final Roster activeRoster = createValidTestRoster(1); + final Roster candidateRoster = createValidTestRoster(2); + writableRosterStore.putCandidateRoster(candidateRoster); + assertSame( + readableRosterStore.getCandidateRoster(), + candidateRoster, + "Candidate roster should be the same as one we've just set"); + writableRosterStore.putActiveRoster(activeRoster, 1); + assertSame( + readableRosterStore.getActiveRoster(), + activeRoster, + "Returned active roster should be the same as we've just set"); + assertNull( + readableRosterStore.getCandidateRoster(), + "No candidate roster should exist in the state immediately after setting a new active roster"); + } + + /** + * Test that the oldest roster is removed when a third roster is set + */ + @Test + @DisplayName("Test Oldest Active Roster Cleanup") + void testOldestActiveRosterRemoved() throws NoSuchFieldException, IllegalAccessException { + final Roster roster1 = createValidTestRoster(3); + writableRosterStore.putActiveRoster(roster1, 1); + assertSame(readableRosterStore.getActiveRoster(), roster1, "Returned active roster should be roster1"); + + final Roster roster2 = createValidTestRoster(1); + writableRosterStore.putActiveRoster(roster2, 2); + assertSame(readableRosterStore.getActiveRoster(), roster2, "Returned active roster should be roster2"); + + // set a 3rd candidate roster and adopt it + final Roster roster3 = createValidTestRoster(2); + writableRosterStore.putActiveRoster(roster3, 3); + final WritableSingletonState rosterState = getRosterState(); + assertEquals( + 2, + Objects.requireNonNull(rosterState.get()).roundRosterPairs().size(), + "Only 2 round roster pairs should exist"); + assertFalse( + Objects.requireNonNull(rosterState.get()) + .roundRosterPairs() + .contains( + new RoundRosterPair(2, RosterUtils.hash(roster1).getBytes())), + "Oldest roster should be removed"); + } + + /** + * Test that an exception is thrown if stored active rosters are ever > MAXIMUM_ROSTER_HISTORY_SIZE + */ + @Test + @DisplayName("Test Max Roster List Size Exceeded") + void testMaximumRostersMoreThan2ThrowsException() throws NoSuchFieldException, IllegalAccessException { + final List activeRosters = new ArrayList<>(); + activeRosters.add(new RoundRosterPair( + 1, RosterUtils.hash(createValidTestRoster(1)).getBytes())); + activeRosters.add(new RoundRosterPair( + 2, RosterUtils.hash(createValidTestRoster(2)).getBytes())); + activeRosters.add(new RoundRosterPair( + 3, RosterUtils.hash(createValidTestRoster(3)).getBytes())); + + final Builder rosterStateBuilder = + RosterState.newBuilder().candidateRosterHash(Bytes.EMPTY).roundRosterPairs(activeRosters); + final WritableSingletonState rosterState = getRosterState(); + rosterState.put(rosterStateBuilder.build()); + + final Roster roster = createValidTestRoster(4); + final Exception exception = + assertThrows(IllegalStateException.class, () -> writableRosterStore.putActiveRoster(roster, 4)); + assertEquals( + "Active rosters in the Roster state cannot be more than " + MAXIMUM_ROSTER_HISTORY_SIZE, + exception.getMessage()); + } + + /** + * Test that when a roster hash collision occurs between a newly set active roster and another active roster in + * history, the other roster isn't removed from the state when remove is called + */ + @Test + @DisplayName("Duplicate Roster Hash") + void testRosterHashCollisions() { + final Roster roster1 = createValidTestRoster(3); + writableRosterStore.putActiveRoster(roster1, 1); + assertSame( + readableRosterStore.getActiveRoster(), + roster1, + "Returned active roster should be the same as the one set"); + + final Roster roster2 = createValidTestRoster(1); + writableRosterStore.putActiveRoster(roster2, 2); + assertSame( + readableRosterStore.getActiveRoster(), + roster2, + "Returned active roster should be the same as the one set"); + + writableRosterStore.putActiveRoster(roster1, 3); + assertSame( + readableRosterStore.getActiveRoster(), + roster1, + "3rd active roster with hash collision with first returns the first roster"); + } + + /** + * Creates a valid test roster with the given number of entries. + * + * @param entries the number of entries + * @return a valid roster + */ + private Roster createValidTestRoster(final int entries) { + final List entriesList = new LinkedList<>(); + for (int i = 0; i < entries; i++) { + entriesList.add(RosterEntry.newBuilder() + .nodeId(i) + .weight(i + 1) // weight must be > 0 + .gossipCaCertificate(Bytes.wrap("test" + i)) + .tssEncryptionKey(Bytes.wrap("test" + i)) + .gossipEndpoint(ServiceEndpoint.newBuilder() + .domainName("domain.com" + i) + .port(666) + .build()) + .build()); + } + return Roster.newBuilder().rosterEntries(entriesList).build(); + } + + /** + * Gets the roster state from the WritableRosterStore via reflection for testing purposes only. + * + * @return the roster state + * @throws NoSuchFieldException if the field is not found + * @throws IllegalAccessException if the field is not accessible + */ + private WritableSingletonState getRosterState() throws NoSuchFieldException, IllegalAccessException { + final Field field = WritableRosterStore.class.getDeclaredField("rosterState"); + field.setAccessible(true); + return (WritableSingletonState) field.get(writableRosterStore); + } +} diff --git a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/sync/protocol/SyncProtocolFactoryTests.java b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/sync/protocol/SyncProtocolFactoryTests.java index 6d03cd1caa7f..212a373c6999 100644 --- a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/sync/protocol/SyncProtocolFactoryTests.java +++ b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/sync/protocol/SyncProtocolFactoryTests.java @@ -31,7 +31,6 @@ import com.swirlds.common.platform.NodeId; import com.swirlds.common.test.fixtures.platform.TestPlatformContextBuilder; import com.swirlds.common.threading.pool.ParallelExecutionException; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.gossip.IntakeEventCounter; import com.swirlds.platform.gossip.SyncException; import com.swirlds.platform.gossip.permits.SyncPermitProvider; @@ -47,6 +46,7 @@ import java.io.IOException; import java.time.Duration; import java.util.List; +import org.hiero.consensus.gossip.FallenBehindManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; diff --git a/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/system/address/RandomRosterBuilderTests.java b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/system/address/RandomRosterBuilderTests.java new file mode 100644 index 000000000000..96eff77ee559 --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/test/java/com/swirlds/platform/system/address/RandomRosterBuilderTests.java @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.system.address; + +import static org.assertj.core.api.Fail.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.platform.NodeId; +import com.swirlds.common.test.fixtures.Randotron; +import com.swirlds.platform.crypto.CryptoStatic; +import com.swirlds.platform.crypto.KeysAndCerts; +import com.swirlds.platform.crypto.PlatformSigner; +import com.swirlds.platform.roster.RosterUtils; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.security.PublicKey; +import org.junit.jupiter.api.Test; + +class RandomRosterBuilderTests { + + /** + * Assert that the given keys are unique. + * + * @param keyA the first key + * @param keyB the second key + */ + private void assertKeysAreUnique(@NonNull final PublicKey keyA, @NonNull final PublicKey keyB) { + final byte[] keyABytes = keyA.getEncoded(); + final byte[] keyBBytes = keyB.getEncoded(); + + for (int i = 0; i < keyABytes.length; i++) { + if (keyABytes[i] != keyBBytes[i]) { + return; + } + } + fail("Keys are not unique"); + } + + /** + * Normally this would be broken up into several tests, but because it's not cheap to generate keys, better + * to do it all in one test with the same set of keys. + */ + @Test + void validDeterministicKeysTest() { + final Randotron randotron = Randotron.create(); + + // Only generate small address book (it's expensive to generate signatures) + final int size = 3; + + final RandomRosterBuilder builderA = + RandomRosterBuilder.create(randotron).withSize(size).withRealKeysEnabled(true); + final Roster rosterA = builderA.build(); + + final RandomRosterBuilder builderB = RandomRosterBuilder.create(randotron.copyAndReset()) + .withSize(size) + .withRealKeysEnabled(true); + final Roster rosterB = builderB.build(); + + // The address book should be the same (keys should be deterministic) + assertEquals(RosterUtils.hash(rosterA), RosterUtils.hash(rosterB)); + + // Verify that each address has unique keys + for (int i = 0; i < size; i++) { + for (int j = i + 1; j < size; j++) { + if (i == j) { + continue; + } + + final RosterEntry addressI = rosterA.rosterEntries().get(i); + final PublicKey signaturePublicKeyI = + RosterUtils.fetchGossipCaCertificate(addressI).getPublicKey(); + + final RosterEntry addressJ = rosterA.rosterEntries().get(j); + final PublicKey signaturePublicKeyJ = + RosterUtils.fetchGossipCaCertificate(addressJ).getPublicKey(); + + assertKeysAreUnique(signaturePublicKeyI, signaturePublicKeyJ); + } + } + + // Verify that the private key can produce valid signatures that can be verified by the public key + for (int i = 0; i < size; i++) { + final RosterEntry address = rosterA.rosterEntries().get(i); + final NodeId id = NodeId.of(address.nodeId()); + final PublicKey signaturePublicKey = + RosterUtils.fetchGossipCaCertificate(address).getPublicKey(); + final KeysAndCerts privateKeys = builderA.getPrivateKeys(id); + + final byte[] dataArray = randotron.nextByteArray(64); + final Bytes dataBytes = Bytes.wrap(dataArray); + final com.swirlds.common.crypto.Signature signature = new PlatformSigner(privateKeys).sign(dataArray); + + assertTrue(CryptoStatic.verifySignature(dataBytes, signature.getBytes(), signaturePublicKey)); + + // Sanity check: validating using the wrong public key should fail + final RosterEntry wrongAddress = rosterA.rosterEntries().get((i + 1) % size); + final NodeId wrongId = NodeId.of(wrongAddress.nodeId()); + final PublicKey wrongPublicKey = + RosterUtils.fetchGossipCaCertificate(wrongAddress).getPublicKey(); + assertFalse(CryptoStatic.verifySignature(dataBytes, signature.getBytes(), wrongPublicKey)); + + // Sanity check: validating against the wrong data should fail + final Bytes wrongData = randotron.nextHashBytes(); + assertFalse(CryptoStatic.verifySignature(wrongData, signature.getBytes(), signaturePublicKey)); + + // Sanity check: validating with a modified signature should fail + final byte[] modifiedSignature = signature.getBytes().toByteArray(); + modifiedSignature[0] = (byte) ~modifiedSignature[0]; + assertFalse(CryptoStatic.verifySignature(dataBytes, Bytes.wrap(modifiedSignature), signaturePublicKey)); + } + } +} diff --git a/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-private-key/private-alice.pfx b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-private-key/private-alice.pfx new file mode 100644 index 000000000000..c7af10445233 Binary files /dev/null and b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-private-key/private-alice.pfx differ diff --git a/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-private-key/public.pfx b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-private-key/public.pfx new file mode 100644 index 000000000000..d6631eeb7fda Binary files /dev/null and b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-private-key/public.pfx differ diff --git a/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-alice.pfx b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-alice.pfx new file mode 100644 index 000000000000..c7af10445233 Binary files /dev/null and b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-alice.pfx differ diff --git a/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-bob.pfx b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-bob.pfx new file mode 100644 index 000000000000..e243077f29a5 Binary files /dev/null and b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-bob.pfx differ diff --git a/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-carol.pfx b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-carol.pfx new file mode 100644 index 000000000000..c7f588884d4e Binary files /dev/null and b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/private-carol.pfx differ diff --git a/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/public.pfx b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/public.pfx new file mode 100644 index 000000000000..dc83c2899a0a Binary files /dev/null and b/platform-sdk/swirlds-platform-core/src/test/resources/com/swirlds/platform/crypto/EnhancedKeyStoreLoader/migration-invalid-missing-public-key/public.pfx differ diff --git a/platform-sdk/swirlds-platform-core/src/testFixtures/java/com/swirlds/platform/test/fixtures/addressbook/RandomRosterBuilder.java b/platform-sdk/swirlds-platform-core/src/testFixtures/java/com/swirlds/platform/test/fixtures/addressbook/RandomRosterBuilder.java new file mode 100644 index 000000000000..88860b58646f --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/testFixtures/java/com/swirlds/platform/test/fixtures/addressbook/RandomRosterBuilder.java @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.test.fixtures.addressbook; + +import static com.swirlds.common.utility.CommonUtils.nameToAlias; +import static com.swirlds.platform.crypto.KeyCertPurpose.SIGNING; + +import com.hedera.hapi.node.state.roster.Roster; +import com.swirlds.common.platform.NodeId; +import com.swirlds.platform.crypto.KeysAndCerts; +import com.swirlds.platform.crypto.PublicStores; +import com.swirlds.platform.crypto.SerializableX509Certificate; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Random; +import java.util.stream.IntStream; + +/** + * A utility for generating a random roster. + */ +public class RandomRosterBuilder { + + /** + * All randomness comes from this. + */ + private final Random random; + + /** + * The number of roster entries to put into the roster. + */ + private int size = 4; + + /** + * Describes different ways that the random roster has its weight distributed if the custom strategy lambda is + * unset. + */ + public enum WeightDistributionStrategy { + /** + * All nodes have equal weight. + */ + BALANCED, + /** + * Nodes are given weight with a gaussian distribution. + */ + GAUSSIAN + } + + /** + * The weight distribution strategy. + */ + private WeightDistributionStrategy weightDistributionStrategy = WeightDistributionStrategy.GAUSSIAN; + + /** + * The average weight. Used directly if using {@link WeightDistributionStrategy#BALANCED}, used as mean if using + * {@link WeightDistributionStrategy#GAUSSIAN}. + */ + private long averageWeight = 1000; + + /** + * The standard deviation of the weight, ignored if distribution strategy is not + * {@link WeightDistributionStrategy#GAUSSIAN}. + */ + private long weightStandardDeviation = 100; + + /** + * The minimum weight to give to any particular address. + */ + private long minimumWeight = 0; + + /** + * The maximum weight to give to any particular address. + */ + private Long maximumWeight; + + /** + * the next available node id for new addresses. + */ + private NodeId nextNodeId = NodeId.FIRST_NODE_ID; + + /** + * If true then generate real cryptographic keys. + */ + private boolean realKeys; + + /** + * If we are using real keys, this map will hold the private keys for each address. + */ + private final Map privateKeys = new HashMap<>(); + + /** + * Create a new random roster generator. + * + * @param random a source of randomness + * @return a new random roster generator + */ + @NonNull + public static RandomRosterBuilder create(@NonNull final Random random) { + return new RandomRosterBuilder(random); + } + + /** + * Constructor. + * + * @param random a source of randomness + */ + private RandomRosterBuilder(@NonNull final Random random) { + this.random = Objects.requireNonNull(random); + } + + /** + * Build a random roster given the provided configuration. + */ + @NonNull + public Roster build() { + final Roster.Builder builder = Roster.newBuilder(); + + if (maximumWeight == null && size > 0) { + // We don't want the total weight to overflow a long + maximumWeight = Long.MAX_VALUE / size; + } + + builder.rosterEntries(IntStream.range(0, size) + .mapToObj(index -> { + final NodeId nodeId = getNextNodeId(); + final RandomRosterEntryBuilder addressBuilder = RandomRosterEntryBuilder.create(random) + .withNodeId(nodeId) + .withWeight(getNextWeight()); + + generateKeys(nodeId, addressBuilder); + return addressBuilder.build(); + }) + .toList()); + + return builder.build(); + } + + /** + * Set the size of the roster. + * + * @return this object + */ + @NonNull + public RandomRosterBuilder withSize(final int size) { + this.size = size; + return this; + } + + /** + * Set the average weight for an address. If the weight distribution strategy is + * {@link WeightDistributionStrategy#BALANCED}, all addresses will have this weight. If the weight distribution + * strategy is {@link WeightDistributionStrategy#GAUSSIAN}, this will be the mean weight. + * + * @return this object + */ + @NonNull + public RandomRosterBuilder withAverageWeight(final long averageWeight) { + this.averageWeight = averageWeight; + return this; + } + + /** + * Set the standard deviation for the weight for an address. Ignored unless the weight distribution strategy is + * {@link WeightDistributionStrategy#GAUSSIAN}. + * + * @return this object + */ + @NonNull + public RandomRosterBuilder withWeightStandardDeviation(final long weightStandardDeviation) { + this.weightStandardDeviation = weightStandardDeviation; + return this; + } + + /** + * Set the minimum weight for an address. Overrides the weight generation strategy. + * + * @return this object + */ + @NonNull + public RandomRosterBuilder withMinimumWeight(final long minimumWeight) { + this.minimumWeight = minimumWeight; + return this; + } + + /** + * Set the maximum weight for an address. Overrides the weight generation strategy. + * + * @return this object + */ + @NonNull + public RandomRosterBuilder withMaximumWeight(final long maximumWeight) { + this.maximumWeight = maximumWeight; + return this; + } + + /** + * Set the strategy used for deciding distribution of weight. + * + * @return this object + */ + @NonNull + public RandomRosterBuilder withWeightDistributionStrategy( + @NonNull final WeightDistributionStrategy weightDistributionStrategy) { + + this.weightDistributionStrategy = weightDistributionStrategy; + return this; + } + + /** + * Specify if real cryptographic keys should be generated (default false). Warning: generating real keys is very + * time consuming. + * + * @param realKeysEnabled if true then generate real cryptographic keys + * @return this object + */ + @NonNull + public RandomRosterBuilder withRealKeysEnabled(final boolean realKeysEnabled) { + this.realKeys = realKeysEnabled; + return this; + } + + /** + * Get the private keys for a node. Should only be called after the roster has been built and only if + * {@link #withRealKeysEnabled(boolean)} was set to true. + * + * @param nodeId the node id + * @return the private keys + * @throws IllegalStateException if real keys are not being generated or the roster has not been built + */ + @NonNull + public KeysAndCerts getPrivateKeys(@NonNull final NodeId nodeId) { + if (!realKeys) { + throw new IllegalStateException("Real keys are not being generated"); + } + if (!privateKeys.containsKey(nodeId)) { + throw new IllegalStateException("Unknown node ID " + nodeId); + } + return privateKeys.get(nodeId); + } + + /** + * Generate the next node ID. + */ + @NonNull + private NodeId getNextNodeId() { + final NodeId nextId = nextNodeId; + // randomly advance between 1 and 3 steps + final int randomAdvance = random.nextInt(3); + nextNodeId = nextNodeId.getOffset(randomAdvance + 1L); + return nextId; + } + + /** + * Generate the next weight for the next address. + */ + private long getNextWeight() { + final long unboundedWeight; + switch (weightDistributionStrategy) { + case BALANCED -> unboundedWeight = averageWeight; + case GAUSSIAN -> unboundedWeight = + Math.max(0, (long) (averageWeight + random.nextGaussian() * weightStandardDeviation)); + default -> throw new IllegalStateException("Unexpected value: " + weightDistributionStrategy); + } + + return Math.min(maximumWeight, Math.max(minimumWeight, unboundedWeight)); + } + + /** + * Generate the cryptographic keys for a node. + */ + private void generateKeys(@NonNull final NodeId nodeId, @NonNull final RandomRosterEntryBuilder addressBuilder) { + if (realKeys) { + try { + final PublicStores publicStores = new PublicStores(); + final String name = nodeId.toString(); + + final byte[] masterKey = new byte[64]; + random.nextBytes(masterKey); + + final KeysAndCerts keysAndCerts = + KeysAndCerts.generate(name, new byte[] {}, masterKey, new byte[] {}, publicStores); + privateKeys.put(nodeId, keysAndCerts); + + final String alias = nameToAlias(name); + + final SerializableX509Certificate sigCert = + new SerializableX509Certificate(publicStores.getCertificate(SIGNING, alias)); + + addressBuilder.withSigCert(sigCert); + + } catch (final Exception e) { + throw new RuntimeException(); + } + } + } +} diff --git a/platform-sdk/swirlds-platform-core/src/testFixtures/java/com/swirlds/platform/test/fixtures/addressbook/RandomRosterEntryBuilder.java b/platform-sdk/swirlds-platform-core/src/testFixtures/java/com/swirlds/platform/test/fixtures/addressbook/RandomRosterEntryBuilder.java new file mode 100644 index 000000000000..5c778262e19f --- /dev/null +++ b/platform-sdk/swirlds-platform-core/src/testFixtures/java/com/swirlds/platform/test/fixtures/addressbook/RandomRosterEntryBuilder.java @@ -0,0 +1,192 @@ +/* + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.swirlds.platform.test.fixtures.addressbook; + +import static com.swirlds.common.test.fixtures.RandomUtils.randomIp; + +import com.hedera.hapi.node.base.ServiceEndpoint; +import com.hedera.hapi.node.state.roster.RosterEntry; +import com.hedera.pbj.runtime.io.buffer.Bytes; +import com.swirlds.common.platform.NodeId; +import com.swirlds.platform.crypto.SerializableX509Certificate; +import com.swirlds.platform.test.fixtures.crypto.PreGeneratedX509Certs; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.security.cert.CertificateEncodingException; +import java.util.Objects; +import java.util.Random; + +/** + * A builder for creating random {@link RosterEntry} instances. + */ +public class RandomRosterEntryBuilder { + + private final Random random; + private NodeId nodeId; + private Long weight; + private Integer port; + private String hostname; + private SerializableX509Certificate sigCert; + + private long minimumWeight = 0; + private long maximumWeight = Long.MAX_VALUE / 1024; + + /** + * Creates a new {@link RandomRosterEntryBuilder} instance. + * + * @param random the random number generator to use + * @return a new {@link RandomRosterEntryBuilder} instance + */ + @NonNull + public static RandomRosterEntryBuilder create(@NonNull final Random random) { + return new RandomRosterEntryBuilder(random); + } + + /** + * Constructor. + * + * @param random the random number generator to use + */ + private RandomRosterEntryBuilder(@NonNull final Random random) { + this.random = Objects.requireNonNull(random); + } + + /** + * Builds a new {@link RosterEntry} instance. + * + * @return a new {@link RosterEntry} instance + */ + @NonNull + public RosterEntry build() { + if (nodeId == null) { + nodeId = NodeId.of(random.nextLong(0, Long.MAX_VALUE)); + } + + if (weight == null) { + weight = random.nextLong(minimumWeight, maximumWeight); + } + + if (port == null) { + port = random.nextInt(0, 65535); + } + + if (hostname == null) { + hostname = randomIp(random); + } + + if (sigCert == null) { + sigCert = PreGeneratedX509Certs.getSigCert(nodeId.id()); + } + + try { + return RosterEntry.newBuilder() + .nodeId(nodeId.id()) + .weight(weight) + .gossipCaCertificate(Bytes.wrap(sigCert.getCertificate().getEncoded())) + .gossipEndpoint(ServiceEndpoint.newBuilder() + .domainName(hostname) + .port(port) + .build()) + .build(); + } catch (CertificateEncodingException e) { + throw new IllegalStateException(e); + } + } + + /** + * Sets the {@link NodeId} for the address. + * + * @param nodeId the node ID + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withNodeId(@NonNull final NodeId nodeId) { + this.nodeId = Objects.requireNonNull(nodeId); + return this; + } + + /** + * Sets the weight for the address. + * + * @param weight the weight + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withWeight(final long weight) { + this.weight = weight; + return this; + } + + /** + * Sets the port for the address. + * + * @param port the port + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withPort(final int port) { + this.port = port; + return this; + } + + /** + * Sets the hostname for the address. + * + * @param hostname the hostname + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withHostname(@NonNull final String hostname) { + this.hostname = Objects.requireNonNull(hostname); + return this; + } + + /** + * Sets the sigCert for the address. + * + * @param sigCert the sigCert + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withSigCert(@NonNull final SerializableX509Certificate sigCert) { + this.sigCert = Objects.requireNonNull(sigCert); + return this; + } + + /** + * Sets the minimum weight. Ignored if the weight is specifically set. Default 0. + * + * @param minimumWeight the minimum weight + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withMinimumWeight(final long minimumWeight) { + this.minimumWeight = minimumWeight; + return this; + } + + /** + * Sets the maximum weight. Ignored if the weight is specifically set. Default Long.MAX_VALUE / 1024. + * + * @param maximumWeight the maximum weight + * @return this builder + */ + @NonNull + public RandomRosterEntryBuilder withMaximumWeight(final long maximumWeight) { + this.maximumWeight = maximumWeight; + return this; + } +} diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/build.gradle.kts b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/build.gradle.kts index 6df6656b684e..2d9b0e84cc13 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/build.gradle.kts +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/build.gradle.kts @@ -34,5 +34,6 @@ testModuleInfo { requires("org.mockito.junit.jupiter") requires("com.swirlds.metrics.api") requires("org.mockito") + requires("org.hiero.consensus.gossip") requiresStatic("com.github.spotbugs.annotations") } diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetEventCreatorTests.java b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetEventCreatorTests.java index 9f76f261a691..32fe6a45c304 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetEventCreatorTests.java +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetEventCreatorTests.java @@ -29,6 +29,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.hedera.hapi.platform.event.EventTransaction; import com.hedera.hapi.platform.event.EventTransaction.TransactionOneOfType; import com.hedera.pbj.runtime.OneOf; @@ -52,13 +54,11 @@ import com.swirlds.platform.internal.EventImpl; import com.swirlds.platform.system.BasicSoftwareVersion; import com.swirlds.platform.system.SoftwareVersion; -import com.swirlds.platform.system.address.Address; -import com.swirlds.platform.system.address.AddressBook; import com.swirlds.platform.system.events.EventConstants; import com.swirlds.platform.system.events.EventDescriptorWrapper; import com.swirlds.platform.system.events.UnsignedEvent; import com.swirlds.platform.system.transaction.Transaction; -import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder; import com.swirlds.platform.test.fixtures.event.TestingEventBuilder; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -101,7 +101,7 @@ private record SimulatedNode( private EventCreator buildEventCreator( @NonNull final Random random, @NonNull final Time time, - @NonNull final AddressBook addressBook, + @NonNull final Roster roster, @NonNull final NodeId nodeId, @NonNull final TransactionSupplier transactionSupplier) { @@ -114,7 +114,7 @@ private EventCreator buildEventCreator( final SoftwareVersion softwareVersion = new BasicSoftwareVersion(1); return new TipsetEventCreator( - platformContext, random, signer, addressBook, nodeId, softwareVersion, transactionSupplier); + platformContext, random, signer, roster, nodeId, softwareVersion, transactionSupplier); } /** @@ -124,7 +124,7 @@ private EventCreator buildEventCreator( private Map buildSimulatedNodes( @NonNull final Random random, @NonNull final Time time, - @NonNull final AddressBook addressBook, + @NonNull final Roster roster, @NonNull final TransactionSupplier transactionSupplier, @NonNull final AncientMode ancientMode) { @@ -132,20 +132,21 @@ private Map buildSimulatedNodes( final PlatformContext platformContext = TestPlatformContextBuilder.create().withTime(time).build(); - for (final Address address : addressBook) { + for (final RosterEntry address : roster.rosterEntries()) { final EventCreator eventCreator = - buildEventCreator(random, time, addressBook, address.getNodeId(), transactionSupplier); + buildEventCreator(random, time, roster, NodeId.of(address.nodeId()), transactionSupplier); - final TipsetTracker tipsetTracker = new TipsetTracker(time, addressBook, ancientMode); + final TipsetTracker tipsetTracker = new TipsetTracker(time, roster, ancientMode); final ChildlessEventTracker childlessEventTracker = new ChildlessEventTracker(); final TipsetWeightCalculator tipsetWeightCalculator = new TipsetWeightCalculator( - platformContext, addressBook, address.getNodeId(), tipsetTracker, childlessEventTracker); + platformContext, roster, NodeId.of(address.nodeId()), tipsetTracker, childlessEventTracker); eventCreators.put( - address.getNodeId(), - new SimulatedNode(address.getNodeId(), tipsetTracker, eventCreator, tipsetWeightCalculator)); + NodeId.of(address.nodeId()), + new SimulatedNode( + NodeId.of(address.nodeId()), tipsetTracker, eventCreator, tipsetWeightCalculator)); } return eventCreators; @@ -313,8 +314,8 @@ void roundRobinTest(final boolean advancingClock, final boolean useBirthRoundFor final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(networkSize).build(); final FakeTime time = new FakeTime(); @@ -323,21 +324,21 @@ void roundRobinTest(final boolean advancingClock, final boolean useBirthRoundFor final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); final Map events = new HashMap<>(); for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - for (final Address address : addressBook) { + for (final RosterEntry address : roster.rosterEntries()) { if (advancingClock) { time.tick(Duration.ofMillis(10)); } transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -367,8 +368,8 @@ void randomOrderTest(final boolean advancingClock, final boolean useBirthRoundFo final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(networkSize).build(); final FakeTime time = new FakeTime(); @@ -377,7 +378,7 @@ void randomOrderTest(final boolean advancingClock, final boolean useBirthRoundFo final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); @@ -385,20 +386,19 @@ void randomOrderTest(final boolean advancingClock, final boolean useBirthRoundFo for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - final List
    addresses = new ArrayList<>(); - addressBook.iterator().forEachRemaining(addresses::add); + final List addresses = new ArrayList<>(roster.rosterEntries()); Collections.shuffle(addresses, random); boolean atLeastOneEventCreated = false; - for (final Address address : addresses) { + for (final RosterEntry address : addresses) { if (advancingClock) { time.tick(Duration.ofMillis(10)); } transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -435,35 +435,34 @@ void clearTest(final boolean advancingClock) { final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(networkSize).build(); final FakeTime time = new FakeTime(); final AtomicReference> transactionSupplier = new AtomicReference<>(); - final Map nodes = buildSimulatedNodes( - random, time, addressBook, transactionSupplier::get, AncientMode.GENERATION_THRESHOLD); + final Map nodes = + buildSimulatedNodes(random, time, roster, transactionSupplier::get, AncientMode.GENERATION_THRESHOLD); for (int i = 0; i < 5; i++) { final Map events = new HashMap<>(); for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - final List
    addresses = new ArrayList<>(); - addressBook.iterator().forEachRemaining(addresses::add); + final List addresses = new ArrayList<>(roster.rosterEntries()); Collections.shuffle(addresses, random); boolean atLeastOneEventCreated = false; - for (final Address address : addresses) { + for (final RosterEntry address : addresses) { if (advancingClock) { time.tick(Duration.ofMillis(10)); } transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -510,8 +509,8 @@ void createManyEventsInARowTest(final boolean advancingClock, final boolean useB final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(networkSize).build(); final FakeTime time = new FakeTime(); @@ -520,14 +519,14 @@ void createManyEventsInARowTest(final boolean advancingClock, final boolean useB final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); final Map events = new HashMap<>(); for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - for (final Address address : addressBook) { + for (final RosterEntry address : roster.rosterEntries()) { int count = 0; while (true) { @@ -537,7 +536,7 @@ void createManyEventsInARowTest(final boolean advancingClock, final boolean useB transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -578,18 +577,21 @@ void zeroWeightNodeTest(final boolean advancingClock, final boolean useBirthRoun final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + Roster roster = RandomRosterBuilder.create(random).withSize(networkSize).build(); - final NodeId zeroWeightNode = addressBook.getNodeId(0); + final NodeId zeroWeightNode = NodeId.of(roster.rosterEntries().get(0).nodeId()); - for (final Address address : addressBook) { - if (address.getNodeId().equals(zeroWeightNode)) { - addressBook.add(address.copySetWeight(0)); - } else { - addressBook.add(address.copySetWeight(1)); - } - } + roster = Roster.newBuilder() + .rosterEntries(roster.rosterEntries().stream() + .map(entry -> { + if (entry.nodeId() == zeroWeightNode.id()) { + return entry.copyBuilder().weight(0).build(); + } else { + return entry.copyBuilder().weight(1).build(); + } + }) + .toList()) + .build(); final FakeTime time = new FakeTime(); @@ -598,7 +600,7 @@ void zeroWeightNodeTest(final boolean advancingClock, final boolean useBirthRoun final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); @@ -608,20 +610,19 @@ void zeroWeightNodeTest(final boolean advancingClock, final boolean useBirthRoun for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - final List
    addresses = new ArrayList<>(); - addressBook.iterator().forEachRemaining(addresses::add); + final List addresses = new ArrayList<>(roster.rosterEntries()); Collections.shuffle(addresses, random); boolean atLeastOneEventCreated = false; - for (final Address address : addresses) { + for (final RosterEntry address : addresses) { if (advancingClock) { time.tick(Duration.ofMillis(10)); } transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -675,18 +676,21 @@ void zeroWeightSlowNodeTest(final boolean advancingClock, final boolean useBirth final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + Roster roster = RandomRosterBuilder.create(random).withSize(networkSize).build(); - final NodeId zeroWeightNode = addressBook.getNodeId(0); + final NodeId zeroWeightNode = NodeId.of(roster.rosterEntries().get(0).nodeId()); - for (final Address address : addressBook) { - if (address.getNodeId().equals(zeroWeightNode)) { - addressBook.add(address.copySetWeight(0)); - } else { - addressBook.add(address.copySetWeight(1)); - } - } + roster = Roster.newBuilder() + .rosterEntries(roster.rosterEntries().stream() + .map(entry -> { + if (entry.nodeId() == zeroWeightNode.id()) { + return entry.copyBuilder().weight(0).build(); + } else { + return entry.copyBuilder().weight(1).build(); + } + }) + .toList()) + .build(); final FakeTime time = new FakeTime(); @@ -695,7 +699,7 @@ void zeroWeightSlowNodeTest(final boolean advancingClock, final boolean useBirth final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); @@ -705,20 +709,19 @@ void zeroWeightSlowNodeTest(final boolean advancingClock, final boolean useBirth for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - final List
    addresses = new ArrayList<>(); - addressBook.iterator().forEachRemaining(addresses::add); + final List addresses = new ArrayList<>(roster.rosterEntries()); Collections.shuffle(addresses, random); boolean atLeastOneEventCreated = false; - for (final Address address : addresses) { + for (final RosterEntry address : addresses) { if (advancingClock) { time.tick(Duration.ofMillis(10)); } transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -783,8 +786,8 @@ void sizeOneNetworkTest(final boolean advancingClock, final boolean useBirthRoun final int networkSize = 1; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(networkSize).build(); final FakeTime time = new FakeTime(); @@ -793,13 +796,13 @@ void sizeOneNetworkTest(final boolean advancingClock, final boolean useBirthRoun final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); final Map events = new HashMap<>(); - final Address address = addressBook.getAddress(addressBook.getNodeId(0)); + final RosterEntry address = roster.rosterEntries().get(0); for (int eventIndex = 0; eventIndex < 100; eventIndex++) { if (advancingClock) { @@ -808,7 +811,7 @@ void sizeOneNetworkTest(final boolean advancingClock, final boolean useBirthRoun transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final UnsignedEvent event = eventCreator.maybeCreateEvent(); @@ -862,7 +865,7 @@ void frozenEventCreationBug() { final int networkSize = 4; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + final Roster roster = RandomRosterBuilder.create(random) .withMinimumWeight(1) .withMaximumWeight(1) .withSize(networkSize) @@ -870,13 +873,13 @@ void frozenEventCreationBug() { final FakeTime time = new FakeTime(); - final NodeId nodeA = addressBook.getNodeId(0); // self - final NodeId nodeB = addressBook.getNodeId(1); - final NodeId nodeC = addressBook.getNodeId(2); - final NodeId nodeD = addressBook.getNodeId(3); + final NodeId nodeA = NodeId.of(roster.rosterEntries().get(0).nodeId()); // self + final NodeId nodeB = NodeId.of(roster.rosterEntries().get(1).nodeId()); + final NodeId nodeC = NodeId.of(roster.rosterEntries().get(2).nodeId()); + final NodeId nodeD = NodeId.of(roster.rosterEntries().get(3).nodeId()); // All nodes except for node 0 are fully mocked. This test is testing how node 0 behaves. - final EventCreator eventCreator = buildEventCreator(random, time, addressBook, nodeA, Collections::emptyList); + final EventCreator eventCreator = buildEventCreator(random, time, roster, nodeA, Collections::emptyList); // Create some genesis events final UnsignedEvent eventA1 = eventCreator.maybeCreateEvent(); @@ -939,7 +942,7 @@ void notRegisteringEventsFromNodesNotInAddressBook() { final int networkSize = 4; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + final Roster roster = RandomRosterBuilder.create(random) .withMinimumWeight(1) .withMaximumWeight(1) .withSize(networkSize) @@ -947,15 +950,15 @@ void notRegisteringEventsFromNodesNotInAddressBook() { final FakeTime time = new FakeTime(); - final NodeId nodeA = addressBook.getNodeId(0); // self - final NodeId nodeB = addressBook.getNodeId(1); - final NodeId nodeC = addressBook.getNodeId(2); - final NodeId nodeD = addressBook.getNodeId(3); + final NodeId nodeA = NodeId.of(roster.rosterEntries().get(0).nodeId()); // self + final NodeId nodeB = NodeId.of(roster.rosterEntries().get(1).nodeId()); + final NodeId nodeC = NodeId.of(roster.rosterEntries().get(2).nodeId()); + final NodeId nodeD = NodeId.of(roster.rosterEntries().get(3).nodeId()); // Node 4 (E) is not in the address book. final NodeId nodeE = NodeId.of(nodeD.id() + 1); // All nodes except for node 0 are fully mocked. This test is testing how node 0 behaves. - final EventCreator eventCreator = buildEventCreator(random, time, addressBook, nodeA, Collections::emptyList); + final EventCreator eventCreator = buildEventCreator(random, time, roster, nodeA, Collections::emptyList); // Create some genesis events final UnsignedEvent eventA1 = eventCreator.maybeCreateEvent(); @@ -1008,7 +1011,7 @@ void noStaleEventsAtCreationTimeTest(final boolean useBirthRoundForAncient) { final int networkSize = 4; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + final Roster roster = RandomRosterBuilder.create(random) .withMinimumWeight(1) .withMaximumWeight(1) .withSize(networkSize) @@ -1016,9 +1019,9 @@ void noStaleEventsAtCreationTimeTest(final boolean useBirthRoundForAncient) { final FakeTime time = new FakeTime(); - final NodeId nodeA = addressBook.getNodeId(0); // self + final NodeId nodeA = NodeId.of(roster.rosterEntries().get(0).nodeId()); // self - final EventCreator eventCreator = buildEventCreator(random, time, addressBook, nodeA, Collections::emptyList); + final EventCreator eventCreator = buildEventCreator(random, time, roster, nodeA, Collections::emptyList); eventCreator.setEventWindow(new EventWindow( 1, @@ -1044,8 +1047,8 @@ void checkSettingEventBirthRound(final boolean advancingClock, final boolean use final int networkSize = 10; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(networkSize).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(networkSize).build(); final FakeTime time = new FakeTime(); @@ -1054,21 +1057,21 @@ void checkSettingEventBirthRound(final boolean advancingClock, final boolean use final Map nodes = buildSimulatedNodes( random, time, - addressBook, + roster, transactionSupplier::get, useBirthRoundForAncient ? AncientMode.BIRTH_ROUND_THRESHOLD : AncientMode.GENERATION_THRESHOLD); final Map events = new HashMap<>(); for (int eventIndex = 0; eventIndex < 100; eventIndex++) { - for (final Address address : addressBook) { + for (final RosterEntry address : roster.rosterEntries()) { if (advancingClock) { time.tick(Duration.ofMillis(10)); } transactionSupplier.set(generateRandomTransactions(random)); - final NodeId nodeId = address.getNodeId(); + final NodeId nodeId = NodeId.of(address.nodeId()); final EventCreator eventCreator = nodes.get(nodeId).eventCreator; final long pendingConsensusRound = eventIndex + 2; diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTests.java b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTests.java index e012f2fbc7a2..6d851f15dd94 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTests.java +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTests.java @@ -19,13 +19,13 @@ import static com.swirlds.common.test.fixtures.RandomUtils.getRandomPrintSeed; import static org.junit.jupiter.api.Assertions.assertEquals; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.swirlds.common.platform.NodeId; import com.swirlds.platform.event.creation.tipset.Tipset; import com.swirlds.platform.event.creation.tipset.TipsetAdvancementWeight; -import com.swirlds.platform.system.address.Address; -import com.swirlds.platform.system.address.AddressBook; -import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder; -import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder.WeightDistributionStrategy; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder.WeightDistributionStrategy; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -50,17 +50,18 @@ void advancementTest() { final int nodeCount = 100; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(nodeCount).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(nodeCount).build(); - final Tipset tipset = new Tipset(addressBook); + final Tipset tipset = new Tipset(roster); assertEquals(nodeCount, tipset.size()); final Map expected = new HashMap<>(); for (int iteration = 0; iteration < 10; iteration++) { for (int creator = 0; creator < 100; creator++) { - final NodeId creatorId = addressBook.getNodeId(creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get(creator).nodeId()); final long generation = random.nextLong(1, 100); tipset.advance(creatorId, generation); @@ -77,17 +78,18 @@ void mergeTest() { final int nodeCount = 100; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(nodeCount).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(nodeCount).build(); for (int count = 0; count < 10; count++) { final List tipsets = new ArrayList<>(); final Map expected = new HashMap<>(); for (int tipsetIndex = 0; tipsetIndex < 10; tipsetIndex++) { - final Tipset tipset = new Tipset(addressBook); + final Tipset tipset = new Tipset(roster); for (int creator = 0; creator < nodeCount; creator++) { - final NodeId creatorId = addressBook.getNodeId(creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get(creator).nodeId()); final long generation = random.nextLong(1, 100); tipset.advance(creatorId, generation); expected.put(creatorId, Math.max(generation, expected.getOrDefault(creatorId, 0L))); @@ -107,17 +109,19 @@ void getAdvancementCountTest() { final int nodeCount = 100; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + final Roster roster = RandomRosterBuilder.create(random) .withSize(nodeCount) .withAverageWeight(1) .withWeightDistributionStrategy(WeightDistributionStrategy.BALANCED) .build(); - final NodeId selfId = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId selfId = + NodeId.of(roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); - final Tipset initialTipset = new Tipset(addressBook); + final Tipset initialTipset = new Tipset(roster); for (long creator = 0; creator < nodeCount; creator++) { - final NodeId creatorId = addressBook.getNodeId((int) creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get((int) creator).nodeId()); final long generation = random.nextLong(1, 100); initialTipset.advance(creatorId, generation); } @@ -126,7 +130,8 @@ void getAdvancementCountTest() { final Tipset comparisonTipset = Tipset.merge(List.of(initialTipset)); assertEquals(initialTipset.size(), comparisonTipset.size()); for (int creator = 0; creator < 100; creator++) { - final NodeId creatorId = addressBook.getNodeId(creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get(creator).nodeId()); assertEquals( initialTipset.getTipGenerationForNode(creatorId), comparisonTipset.getTipGenerationForNode(creatorId)); @@ -135,7 +140,8 @@ void getAdvancementCountTest() { // Cause the comparison tipset to advance in a random way for (int entryIndex = 0; entryIndex < 100; entryIndex++) { final long creator = random.nextLong(100); - final NodeId creatorId = addressBook.getNodeId((int) creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get((int) creator).nodeId()); final long generation = random.nextLong(1, 100); comparisonTipset.advance(creatorId, generation); @@ -143,7 +149,7 @@ void getAdvancementCountTest() { long expectedAdvancementCount = 0; for (int i = 0; i < 100; i++) { - final NodeId nodeId = addressBook.getNodeId(i); + final NodeId nodeId = NodeId.of(roster.rosterEntries().get(i).nodeId()); if (nodeId.equals(selfId)) { // Self advancements are not counted continue; @@ -163,19 +169,21 @@ void weightedGetAdvancementCountTest() { final Random random = getRandomPrintSeed(); final int nodeCount = 100; - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(nodeCount).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(nodeCount).build(); final Map weights = new HashMap<>(); - for (final Address address : addressBook) { - weights.put(address.getNodeId(), address.getWeight()); + for (final RosterEntry address : roster.rosterEntries()) { + weights.put(NodeId.of(address.nodeId()), address.weight()); } - final NodeId selfId = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId selfId = + NodeId.of(roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); - final Tipset initialTipset = new Tipset(addressBook); + final Tipset initialTipset = new Tipset(roster); for (long creator = 0; creator < 100; creator++) { - final NodeId creatorId = addressBook.getNodeId((int) creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get((int) creator).nodeId()); final long generation = random.nextLong(1, 100); initialTipset.advance(creatorId, generation); } @@ -184,22 +192,23 @@ void weightedGetAdvancementCountTest() { final Tipset comparisonTipset = Tipset.merge(List.of(initialTipset)); assertEquals(initialTipset.size(), comparisonTipset.size()); for (int creator = 0; creator < 100; creator++) { - final NodeId creatorId = addressBook.getNodeId(creator); + final NodeId creatorId = + NodeId.of(roster.rosterEntries().get(creator).nodeId()); assertEquals( initialTipset.getTipGenerationForNode(creatorId), comparisonTipset.getTipGenerationForNode(creatorId)); } // Cause the comparison tipset to advance in a random way - for (final Address address : addressBook) { + for (final RosterEntry address : roster.rosterEntries()) { final long generation = random.nextLong(1, 100); - comparisonTipset.advance(address.getNodeId(), generation); + comparisonTipset.advance(NodeId.of(address.nodeId()), generation); } long expectedAdvancementCount = 0; - for (final Address address : addressBook) { - final NodeId nodeId = address.getNodeId(); + for (final RosterEntry address : roster.rosterEntries()) { + final NodeId nodeId = NodeId.of(address.nodeId()); if (nodeId.equals(selfId)) { // Self advancements are not counted continue; diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTrackerTests.java b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTrackerTests.java index c94164c11569..f56992accc88 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTrackerTests.java +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetTrackerTests.java @@ -23,6 +23,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.hedera.hapi.platform.event.EventDescriptor; import com.swirlds.base.time.Time; import com.swirlds.common.platform.NodeId; @@ -31,11 +33,9 @@ import com.swirlds.platform.event.AncientMode; import com.swirlds.platform.event.creation.tipset.Tipset; import com.swirlds.platform.event.creation.tipset.TipsetTracker; -import com.swirlds.platform.system.address.Address; -import com.swirlds.platform.system.address.AddressBook; import com.swirlds.platform.system.events.EventConstants; import com.swirlds.platform.system.events.EventDescriptorWrapper; -import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.ArrayList; import java.util.HashMap; @@ -52,13 +52,13 @@ class TipsetTrackerTests { private static void assertTipsetEquality( - @NonNull final AddressBook addressBook, @NonNull final Tipset expected, @NonNull final Tipset actual) { + @NonNull final Roster roster, @NonNull final Tipset expected, @NonNull final Tipset actual) { assertEquals(expected.size(), actual.size()); - for (final Address address : addressBook) { + for (final RosterEntry address : roster.rosterEntries()) { assertEquals( - expected.getTipGenerationForNode(address.getNodeId()), - actual.getTipGenerationForNode(address.getNodeId())); + expected.getTipGenerationForNode(NodeId.of(address.nodeId())), + actual.getTipGenerationForNode(NodeId.of(address.nodeId()))); } } @@ -69,19 +69,20 @@ void basicBehaviorTest(final AncientMode ancientMode) { final Random random = getRandomPrintSeed(0); final int nodeCount = random.nextInt(10, 20); - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(nodeCount).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(nodeCount).build(); final Map latestEvents = new HashMap<>(); final Map expectedTipsets = new HashMap<>(); - final TipsetTracker tracker = new TipsetTracker(Time.getCurrent(), addressBook, ancientMode); + final TipsetTracker tracker = new TipsetTracker(Time.getCurrent(), roster, ancientMode); long birthRound = ConsensusConstants.ROUND_FIRST; for (int eventIndex = 0; eventIndex < 1000; eventIndex++) { - final NodeId creator = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId creator = NodeId.of( + roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); final long generation; if (latestEvents.containsKey(creator)) { generation = latestEvents.get(creator).eventDescriptor().generation() + 1; @@ -100,7 +101,8 @@ void basicBehaviorTest(final AncientMode ancientMode) { final Set desiredParents = new HashSet<>(); final int maxParentCount = random.nextInt(nodeCount); for (int parentIndex = 0; parentIndex < maxParentCount; parentIndex++) { - final NodeId parent = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId parent = NodeId.of( + roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); // We are only trying to generate a random number of parents, the exact count is unimportant. // So it doesn't matter if the actual number of parents is less than the number we requested. @@ -133,18 +135,18 @@ void basicBehaviorTest(final AncientMode ancientMode) { final Tipset expectedTipset; if (parentTipsets.isEmpty()) { - expectedTipset = new Tipset(addressBook).advance(creator, generation); + expectedTipset = new Tipset(roster).advance(creator, generation); } else { expectedTipset = merge(parentTipsets).advance(creator, generation); } expectedTipsets.put(fingerprint, expectedTipset); - assertTipsetEquality(addressBook, expectedTipset, newTipset); + assertTipsetEquality(roster, expectedTipset, newTipset); } // At the very end, we shouldn't see any modified tipsets for (final EventDescriptorWrapper fingerprint : expectedTipsets.keySet()) { - assertTipsetEquality(addressBook, expectedTipsets.get(fingerprint), tracker.getTipset(fingerprint)); + assertTipsetEquality(roster, expectedTipsets.get(fingerprint), tracker.getTipset(fingerprint)); } // Slowly advance the ancient threshold, we should see tipsets disappear as we go. @@ -161,7 +163,7 @@ void basicBehaviorTest(final AncientMode ancientMode) { if (fingerprint.getAncientIndicator(ancientMode) < ancientThreshold) { assertNull(tracker.getTipset(fingerprint)); } else { - assertTipsetEquality(addressBook, expectedTipsets.get(fingerprint), tracker.getTipset(fingerprint)); + assertTipsetEquality(roster, expectedTipsets.get(fingerprint), tracker.getTipset(fingerprint)); } } } diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetWeightCalculatorTests.java b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetWeightCalculatorTests.java index 473f5fee794d..ec22ea2df17a 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetWeightCalculatorTests.java +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/event/tipset/TipsetWeightCalculatorTests.java @@ -28,6 +28,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; +import com.hedera.hapi.node.state.roster.Roster; +import com.hedera.hapi.node.state.roster.RosterEntry; import com.hedera.hapi.platform.event.EventDescriptor; import com.swirlds.base.time.Time; import com.swirlds.common.context.PlatformContext; @@ -41,12 +43,10 @@ import com.swirlds.platform.event.creation.tipset.TipsetAdvancementWeight; import com.swirlds.platform.event.creation.tipset.TipsetTracker; import com.swirlds.platform.event.creation.tipset.TipsetWeightCalculator; -import com.swirlds.platform.system.address.Address; -import com.swirlds.platform.system.address.AddressBook; import com.swirlds.platform.system.events.EventConstants; import com.swirlds.platform.system.events.EventDescriptorWrapper; -import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder; -import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder.WeightDistributionStrategy; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder; +import com.swirlds.platform.test.fixtures.addressbook.RandomRosterBuilder.WeightDistributionStrategy; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.ArrayList; import java.util.HashMap; @@ -84,34 +84,35 @@ void basicBehaviorTest() { final Map latestEvents = new HashMap<>(); - final AddressBook addressBook = - RandomAddressBookBuilder.create(random).withSize(nodeCount).build(); + final Roster roster = + RandomRosterBuilder.create(random).withSize(nodeCount).build(); final Map weightMap = new HashMap<>(); long totalWeight = 0; - for (final Address address : addressBook) { - weightMap.put(address.getNodeId(), address.getWeight()); - totalWeight += address.getWeight(); + for (final RosterEntry address : roster.rosterEntries()) { + weightMap.put(NodeId.of(address.nodeId()), address.weight()); + totalWeight += address.weight(); } - final NodeId selfId = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId selfId = + NodeId.of(roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); final PlatformContext platformContext = TestPlatformContextBuilder.create().build(); // FUTURE WORK: Expand test to include birth round based ancient threshold. - final TipsetTracker builder = - new TipsetTracker(Time.getCurrent(), addressBook, AncientMode.GENERATION_THRESHOLD); + final TipsetTracker builder = new TipsetTracker(Time.getCurrent(), roster, AncientMode.GENERATION_THRESHOLD); final ChildlessEventTracker childlessEventTracker = new ChildlessEventTracker(); final TipsetWeightCalculator calculator = - new TipsetWeightCalculator(platformContext, addressBook, selfId, builder, childlessEventTracker); + new TipsetWeightCalculator(platformContext, roster, selfId, builder, childlessEventTracker); List previousParents = List.of(); TipsetAdvancementWeight runningAdvancementScore = ZERO_ADVANCEMENT_WEIGHT; Tipset previousSnapshot = calculator.getSnapshot(); for (int eventIndex = 0; eventIndex < 1000; eventIndex++) { - final NodeId creator = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId creator = NodeId.of( + roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); final long generation; if (latestEvents.containsKey(creator)) { generation = latestEvents.get(creator).eventDescriptor().generation() + 1; @@ -127,7 +128,8 @@ void basicBehaviorTest() { final Set desiredParents = new HashSet<>(); final int maxParentCount = random.nextInt(nodeCount); for (int parentIndex = 0; parentIndex < maxParentCount; parentIndex++) { - final NodeId parent = addressBook.getNodeId(random.nextInt(nodeCount)); + final NodeId parent = NodeId.of( + roster.rosterEntries().get(random.nextInt(nodeCount)).nodeId()); // We are only trying to generate a random number of parents, the exact count is unimportant. // So it doesn't matter if the actual number of parents is less than the number we requested. @@ -168,7 +170,7 @@ void basicBehaviorTest() { final Tipset newTipset; if (parentTipsets.isEmpty()) { - newTipset = new Tipset(addressBook).advance(creator, generation); + newTipset = new Tipset(roster).advance(creator, generation); } else { newTipset = merge(parentTipsets).advance(creator, generation); } @@ -217,27 +219,26 @@ void selfishNodeTest() { final Random random = getRandomPrintSeed(); final int nodeCount = 4; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + final Roster roster = RandomRosterBuilder.create(random) .withSize(nodeCount) .withAverageWeight(1) .withWeightDistributionStrategy(WeightDistributionStrategy.BALANCED) .build(); // In this test, we simulate from the perspective of node A. All nodes have 1 weight. - final NodeId nodeA = addressBook.getNodeId(0); - final NodeId nodeB = addressBook.getNodeId(1); - final NodeId nodeC = addressBook.getNodeId(2); - final NodeId nodeD = addressBook.getNodeId(3); + final NodeId nodeA = NodeId.of(roster.rosterEntries().get(0).nodeId()); + final NodeId nodeB = NodeId.of(roster.rosterEntries().get(1).nodeId()); + final NodeId nodeC = NodeId.of(roster.rosterEntries().get(2).nodeId()); + final NodeId nodeD = NodeId.of(roster.rosterEntries().get(3).nodeId()); final PlatformContext platformContext = TestPlatformContextBuilder.create().build(); // FUTURE WORK: Expand test to include birth round based ancient threshold. - final TipsetTracker tracker = - new TipsetTracker(Time.getCurrent(), addressBook, AncientMode.GENERATION_THRESHOLD); + final TipsetTracker tracker = new TipsetTracker(Time.getCurrent(), roster, AncientMode.GENERATION_THRESHOLD); final ChildlessEventTracker childlessEventTracker = new ChildlessEventTracker(); final TipsetWeightCalculator calculator = - new TipsetWeightCalculator(platformContext, addressBook, nodeA, tracker, childlessEventTracker); + new TipsetWeightCalculator(platformContext, roster, nodeA, tracker, childlessEventTracker); final Tipset snapshot1 = calculator.getSnapshot(); @@ -430,7 +431,7 @@ void zeroWeightNodeTest() { final Random random = getRandomPrintSeed(); final int nodeCount = 4; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + Roster roster = RandomRosterBuilder.create(random) .withSize(nodeCount) .withAverageWeight(1) .withWeightDistributionStrategy(WeightDistributionStrategy.BALANCED) @@ -438,22 +439,31 @@ void zeroWeightNodeTest() { // In this test, we simulate from the perspective of node A. // All nodes have 1 weight except for D, which has 0 weight. - final NodeId nodeA = addressBook.getNodeId(0); - final NodeId nodeB = addressBook.getNodeId(1); - final NodeId nodeC = addressBook.getNodeId(2); - final NodeId nodeD = addressBook.getNodeId(3); - - addressBook.add(addressBook.getAddress(nodeD).copySetWeight(0)); + final NodeId nodeA = NodeId.of(roster.rosterEntries().get(0).nodeId()); + final NodeId nodeB = NodeId.of(roster.rosterEntries().get(1).nodeId()); + final NodeId nodeC = NodeId.of(roster.rosterEntries().get(2).nodeId()); + final NodeId nodeD = NodeId.of(roster.rosterEntries().get(3).nodeId()); + + roster = Roster.newBuilder() + .rosterEntries(roster.rosterEntries().stream() + .map(entry -> { + if (entry.nodeId() == nodeD.id()) { + return entry.copyBuilder().weight(0).build(); + } else { + return entry; + } + }) + .toList()) + .build(); final PlatformContext platformContext = TestPlatformContextBuilder.create().build(); // FUTURE WORK: Expand test to include birth round based ancient threshold. - final TipsetTracker builder = - new TipsetTracker(Time.getCurrent(), addressBook, AncientMode.GENERATION_THRESHOLD); + final TipsetTracker builder = new TipsetTracker(Time.getCurrent(), roster, AncientMode.GENERATION_THRESHOLD); final ChildlessEventTracker childlessEventTracker = new ChildlessEventTracker(); final TipsetWeightCalculator calculator = - new TipsetWeightCalculator(platformContext, addressBook, nodeA, builder, childlessEventTracker); + new TipsetWeightCalculator(platformContext, roster, nodeA, builder, childlessEventTracker); final Tipset snapshot1 = calculator.getSnapshot(); @@ -508,26 +518,25 @@ void ancientParentTest() { final Random random = getRandomPrintSeed(); final int nodeCount = 4; - final AddressBook addressBook = RandomAddressBookBuilder.create(random) + final Roster roster = RandomRosterBuilder.create(random) .withSize(nodeCount) .withAverageWeight(1) .withWeightDistributionStrategy(WeightDistributionStrategy.BALANCED) .build(); - final NodeId nodeA = addressBook.getNodeId(0); - final NodeId nodeB = addressBook.getNodeId(1); - final NodeId nodeC = addressBook.getNodeId(2); - final NodeId nodeD = addressBook.getNodeId(3); + final NodeId nodeA = NodeId.of(roster.rosterEntries().get(0).nodeId()); + final NodeId nodeB = NodeId.of(roster.rosterEntries().get(1).nodeId()); + final NodeId nodeC = NodeId.of(roster.rosterEntries().get(2).nodeId()); + final NodeId nodeD = NodeId.of(roster.rosterEntries().get(3).nodeId()); final PlatformContext platformContext = TestPlatformContextBuilder.create().build(); // FUTURE WORK: Expand test to include birth round based ancient threshold. - final TipsetTracker builder = - new TipsetTracker(Time.getCurrent(), addressBook, AncientMode.GENERATION_THRESHOLD); + final TipsetTracker builder = new TipsetTracker(Time.getCurrent(), roster, AncientMode.GENERATION_THRESHOLD); final ChildlessEventTracker childlessEventTracker = new ChildlessEventTracker(); final TipsetWeightCalculator calculator = - new TipsetWeightCalculator(platformContext, addressBook, nodeA, builder, childlessEventTracker); + new TipsetWeightCalculator(platformContext, roster, nodeA, builder, childlessEventTracker); // Create generation 1 events. final EventDescriptorWrapper eventA1 = newEventDescriptor(randomHash(random), nodeA, 1); diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/reconnect/FallenBehindManagerTest.java b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/reconnect/FallenBehindManagerTest.java index 83137d64150c..5c4db630c670 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/reconnect/FallenBehindManagerTest.java +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/reconnect/FallenBehindManagerTest.java @@ -25,7 +25,6 @@ import com.swirlds.common.test.fixtures.Randotron; import com.swirlds.config.extensions.test.fixtures.TestConfigBuilder; import com.swirlds.platform.Utilities; -import com.swirlds.platform.gossip.FallenBehindManager; import com.swirlds.platform.gossip.FallenBehindManagerImpl; import com.swirlds.platform.network.PeerInfo; import com.swirlds.platform.network.topology.NetworkTopology; @@ -35,6 +34,7 @@ import com.swirlds.platform.test.fixtures.addressbook.RandomAddressBookBuilder; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import org.hiero.consensus.gossip.FallenBehindManager; import org.junit.jupiter.api.Test; class FallenBehindManagerTest { diff --git a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/sync/TestingSyncManager.java b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/sync/TestingSyncManager.java index ed44b185f919..69fc8b8ce5c3 100644 --- a/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/sync/TestingSyncManager.java +++ b/platform-sdk/swirlds-unit-tests/core/swirlds-platform-test/src/test/java/com/swirlds/platform/test/sync/TestingSyncManager.java @@ -17,8 +17,8 @@ package com.swirlds.platform.test.sync; import com.swirlds.common.platform.NodeId; -import com.swirlds.platform.gossip.FallenBehindManager; import java.util.List; +import org.hiero.consensus.gossip.FallenBehindManager; public class TestingSyncManager implements FallenBehindManager { /** whether we have fallen behind or not */ diff --git a/settings.gradle.kts b/settings.gradle.kts index 83f80f05bf1e..26e94be8c18f 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -33,6 +33,10 @@ javaModules { module("swirlds") // not actually a Module as it has no module-info.java module("swirlds-benchmarks") // not actually a Module as it has no module-info.java module("swirlds-unit-tests/core/swirlds-platform-test") // nested module is not found automatically + module("consensus-gossip") { artifact = "consensus-gossip" } + module("consensus-gossip-impl") { artifact = "consensus-gossip-impl" } + module("event-creator") { artifact = "event-creator" } + module("event-creator-impl") { artifact = "event-creator-impl" } } // The Hedera services modules