From 8d957f8b6125e46a54051b659e44cf68e224ef15 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 5 Feb 2024 13:16:09 -0600 Subject: [PATCH] GH-2172 Simplify trx_finality_status_forked_test.py and setup finalizers so it works under instant-finality --- tests/CMakeLists.txt | 5 +- tests/trx_finality_status_forked_test.py | 112 ++++++++++------------- 2 files changed, 49 insertions(+), 68 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 068242ee6f..177b7ac0d7 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -222,9 +222,8 @@ set_property(TEST trx_finality_status_if_test PROPERTY LABELS nonparallelizable_ add_test(NAME trx_finality_status_forked_test COMMAND tests/trx_finality_status_forked_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST trx_finality_status_forked_test PROPERTY LABELS nonparallelizable_tests) -# requires https://github.com/AntelopeIO/leap/issues/2189 -#add_test(NAME trx_finality_status_forked_if_test COMMAND tests/trx_finality_status_forked_test.py -v --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -#set_property(TEST trx_finality_status_forked_if_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME trx_finality_status_forked_if_test COMMAND tests/trx_finality_status_forked_test.py -v --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST trx_finality_status_forked_if_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_tests_properties(db_modes_test PROPERTIES COST 6000) diff --git a/tests/trx_finality_status_forked_test.py b/tests/trx_finality_status_forked_test.py index 9c27782e3a..d501d1dd01 100755 --- a/tests/trx_finality_status_forked_test.py +++ b/tests/trx_finality_status_forked_test.py @@ -14,17 +14,20 @@ # trx_finality_status_forked_test # # Test to verify that transaction finality status feature is -# working appropriately when forks occur +# working appropriately when forks occur. +# Note this test does not use transaction retry as forked out +# transactions should always make it into a block unless they +# expire. # ############################################################### Print=Utils.Print errorExit=Utils.errorExit -args = TestHelper.parse_args({"--prod-count","--activate-if","--dump-error-details","--keep-logs","-v","--leave-running", +args = TestHelper.parse_args({"--activate-if","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v -totalProducerNodes=2 +totalProducerNodes=4 totalNonProducerNodes=1 totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=3 @@ -37,12 +40,6 @@ walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -WalletdName=Utils.EosWalletName -ClientName="cleos" - -EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" -EOSIO_ACCT_PUBLIC_DEFAULT_KEY = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - try: TestHelper.printSystemInfo("BEGIN") @@ -62,10 +59,11 @@ # *** setup topogrophy *** - # "bridge" shape connects defprocera through defproducerb (in node0) to each other and defproducerc is alone (in node01) - # and the only connection between those 2 groups is through the bridge node - if cluster.launch(prodCount=2, topo="bridge", pnodes=totalProducerNodes, - totalNodes=totalNodes, totalProducers=totalProducers, activateIF=activateIF, + # "bridge" shape connects defproducera (node0) defproducerb (node1) defproducerc (node2) to each other and defproducerd (node3) + # and the only connection between those 2 groups is through the bridge (node4) + if cluster.launch(topo="./tests/bridge_for_fork_test_shape.json", pnodes=totalProducerNodes, + totalNodes=totalNodes, totalProducers=totalProducerNodes, loadSystemContract=False, + activateIF=activateIF, biosFinalizer=False, specificExtraNodeosArgs=specificExtraNodeosArgs, extraNodeosArgs=extraNodeosArgs) is False: Utils.cmdError("launcher") @@ -75,24 +73,16 @@ # *** identify each node (producers and non-producing node) *** - nonProdNode=None - prodNodes=[] - producers=[] - for i, node in enumerate(cluster.getNodes()): - node.producers=Cluster.parseProducers(node.nodeId) - numProducers=len(node.producers) - Print(f"node {i} has producers={node.producers}") - if numProducers==0: - if nonProdNode is None: - nonProdNode=node - else: - Utils.errorExit("More than one non-producing nodes") - else: - prodNodes.append(node) - producers.extend(node.producers) - - prodAB=prodNodes[0] # defproducera, defproducerb - prodC=prodNodes[1] # defproducerc + prodNode0 = cluster.getNode(0) + prodNode1 = cluster.getNode(1) + prodNode2 = cluster.getNode(2) + prodNode3 = cluster.getNode(3) # other side of bridge + nonProdNode = cluster.getNode(4) + + prodNodes=[ prodNode0, prodNode1, prodNode2, prodNode3 ] + + prodA=prodNode0 # defproducera + prodD=prodNode3 # defproducerc # *** Identify a block where production is stable *** @@ -100,15 +90,6 @@ cluster.biosNode.kill(signal.SIGTERM) cluster.waitOnClusterSync(blockAdvancing=5) - Print("Creating account1") - account1 = Account('account1') - account1.ownerPublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY - account1.activePublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY - cluster.createAccountAndVerify(account1, cluster.eosioAccount, stakedDeposit=1000) - - Print("Validating accounts after bootstrap") - cluster.validateAccounts([account1]) - # *** Killing the "bridge" node *** Print('Sending command to kill "bridge" node to separate the 2 producer groups.') # kill at the beginning of the production window for defproducera, so there is time for the fork for @@ -125,12 +106,12 @@ while nonProdNode.verifyAlive() and count > 0: # wait on prodNode 0 since it will continue to advance, since defproducera and defproducerb are its producers Print("Wait for next block") - assert prodAB.waitForNextBlock(timeout=6), "Production node AB should continue to advance, even after bridge node is killed" + assert prodA.waitForNextBlock(timeout=6), "Production node A should continue to advance, even after bridge node is killed" count -= 1 assert not nonProdNode.verifyAlive(), "Bridge node should have been killed if test was functioning correctly." - assert prodC.waitForNextBlock(), "Prod node C should continue to advance, even after bridge node is killed" + assert prodD.waitForNextBlock(), "Prod node D should continue to advance, even after bridge node is killed" def getState(status): assert status is not None, "ERROR: getTransactionStatus failed to return any status" @@ -139,10 +120,11 @@ def getState(status): return status["state"] transferAmount = 10 - transfer = prodC.transferFunds(cluster.eosioAccount, account1, f"{transferAmount}.0000 {CORE_SYMBOL}", "fund account") + # Does not use transaction retry (not needed) + transfer = prodD.transferFunds(cluster.eosioAccount, cluster.defproduceraAccount, f"{transferAmount}.0000 {CORE_SYMBOL}", "fund account") transBlockNum = transfer['processed']['block_num'] - transId = prodC.getLastTrackedTransactionId() - retStatus = prodC.getTransactionStatus(transId) + transId = prodD.getLastTrackedTransactionId() + retStatus = prodD.getTransactionStatus(transId) state = getState(retStatus) localState = "LOCALLY_APPLIED" @@ -154,18 +136,18 @@ def getState(status): assert state == localState or state == inBlockState, \ f"ERROR: getTransactionStatus didn't return \"{localState}\" or \"{inBlockState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" - assert prodC.waitForNextBlock(), "Production node C should continue to advance, even after bridge node is killed" + assert prodD.waitForNextBlock(), "Production node D should continue to advance, even after bridge node is killed" # since the Bridge node is killed when this producer is producing its last block in its window, there is plenty of time for the transfer to be # sent before the first block is created, but adding this to ensure it is in one of these blocks numTries = 2 while numTries > 0: - retStatus = prodC.getTransactionStatus(transId) + retStatus = prodD.getTransactionStatus(transId) state = getState(retStatus) if state == inBlockState: break numTries -= 1 - assert prodC.waitForNextBlock(), "Production node C should continue to advance, even after bridge node is killed" + assert prodD.waitForNextBlock(), "Production node D should continue to advance, even after bridge node is killed" Print(f"getTransactionStatus returned status: {json.dumps(retStatus, indent=1)}") assert state == inBlockState, \ @@ -177,34 +159,34 @@ def getState(status): if not nonProdNode.relaunch(): errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted") - while prodC.getInfo()['last_irreversible_block_num'] < transBlockNum: - Print("Wait for LIB to move, which indicates prodC may have forked out the branch") - assert prodC.waitForLibToAdvance(60), \ + while prodD.getInfo()['last_irreversible_block_num'] < transBlockNum: + Print("Wait for LIB to move, which indicates prodD may have forked out the branch") + assert prodD.waitForLibToAdvance(60), \ "ERROR: Network did not reach consensus after bridge node was restarted." - if prodC.getTransactionStatus(transId)['state'] == forkedOutState: + if prodD.getTransactionStatus(transId)['state'] == forkedOutState: break - retStatus = prodC.getTransactionStatus(transId) + retStatus = prodD.getTransactionStatus(transId) state = getState(retStatus) assert state == forkedOutState, \ f"ERROR: getTransactionStatus didn't return \"{forkedOutState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" + \ - f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}" for prodNode in prodNodes: info=prodNode.getInfo() Print(f"node info: {json.dumps(info, indent=1)}") - assert prodC.waitForProducer("defproducerc"), \ + assert prodD.waitForProducer("defproducerd"), \ f"Waiting for prodC to produce, but it never happened" + \ - f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}" - retStatus = prodC.getTransactionStatus(transId) + retStatus = prodD.getTransactionStatus(transId) state = getState(retStatus) assert state == inBlockState, \ f"ERROR: getTransactionStatus didn't return \"{inBlockState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" + \ - f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}" afterForkInBlockState = retStatus afterForkBlockId = retStatus["block_id"] @@ -212,22 +194,22 @@ def getState(status): "ERROR: The way the test is designed, the transaction should be added to a block that has a higher number than it was in originally before it was forked out." + \ f"\n\noriginal in block state: {json.dumps(originalInBlockState, indent=1)}\n\nafter fork in block state: {json.dumps(afterForkInBlockState, indent=1)}" - assert prodC.waitForBlock(afterForkInBlockState["block_number"], timeout=120, blockType=BlockType.lib), \ - f"ERROR: Block never finalized.\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + \ + assert prodD.waitForBlock(afterForkInBlockState["block_number"], timeout=120, blockType=BlockType.lib), \ + f"ERROR: Block never finalized.\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodD.getInfo(), indent=1)}" + \ f"\n\nafter fork in block state: {json.dumps(afterForkInBlockState, indent=1)}" - retStatus = prodC.getTransactionStatus(transId) + retStatus = prodD.getTransactionStatus(transId) if afterForkBlockId != retStatus["block_id"]: # might have been forked out, if so wait for new block to become LIB - assert prodC.waitForBlock(retStatus["block_number"], timeout=120, blockType=BlockType.lib), \ - f"ERROR: Block never finalized.\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + \ + assert prodD.waitForBlock(retStatus["block_number"], timeout=120, blockType=BlockType.lib), \ + f"ERROR: Block never finalized.\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodD.getInfo(), indent=1)}" + \ f"\n\nafter fork in block state: {json.dumps(afterForkInBlockState, indent=1)}" - retStatus = prodC.getTransactionStatus(transId) + retStatus = prodD.getTransactionStatus(transId) state = getState(retStatus) assert state == irreversibleState, \ f"ERROR: getTransactionStatus didn't return \"{irreversibleState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" + \ - f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}" testSuccessful=True finally: