diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp index 74865cabcc..e01a811864 100644 --- a/libraries/chain/block_state.cpp +++ b/libraries/chain/block_state.cpp @@ -69,7 +69,7 @@ void block_state::set_trxs_metas( deque&& trxs_metas, } // Called from net threads -std::pair> block_state::aggregate_vote(const vote_message& vote) { +std::pair> block_state::aggregate_vote(const vote_message& vote) { const auto& finalizers = active_finalizer_policy->finalizers; auto it = std::find_if(finalizers.begin(), finalizers.end(), @@ -78,17 +78,17 @@ std::pair> block_state::aggregate_vote(const vote_ if (it != finalizers.end()) { auto index = std::distance(finalizers.begin(), it); const digest_type& digest = vote.strong ? strong_digest : weak_digest; - auto [valid, strong] = pending_qc.add_vote(vote.strong, + auto [status, strong] = pending_qc.add_vote(vote.strong, #warning TODO change to use std::span if possible std::vector{digest.data(), digest.data() + digest.data_size()}, index, vote.finalizer_key, vote.sig, finalizers[index].weight); - return {valid, strong ? core.final_on_strong_qc_block_num : std::optional{}}; + return {status, strong ? core.final_on_strong_qc_block_num : std::optional{}}; } else { wlog( "finalizer_key (${k}) in vote is not in finalizer policy", ("k", vote.finalizer_key) ); - return {}; + return {vote_status::unknown_public_key, {}}; } } @@ -116,12 +116,12 @@ void block_state::verify_qc(const valid_quorum_certificate& qc) const { // verfify quorum is met if( qc.is_strong() ) { EOS_ASSERT( strong_weights >= active_finalizer_policy->threshold, - block_validate_exception, + invalid_qc_claim, "strong quorum is not met, strong_weights: ${s}, threshold: ${t}", ("s", strong_weights)("t", active_finalizer_policy->threshold) ); } else { EOS_ASSERT( strong_weights + weak_weights >= active_finalizer_policy->threshold, - block_validate_exception, + invalid_qc_claim, "weak quorum is not met, strong_weights: ${s}, weak_weights: ${w}, threshold: ${t}", ("s", strong_weights)("w", weak_weights)("t", active_finalizer_policy->threshold) ); } @@ -155,7 +155,7 @@ void block_state::verify_qc(const valid_quorum_certificate& qc) const { } // validate aggregated signature - EOS_ASSERT( fc::crypto::blslib::aggregate_verify( pubkeys, digests, qc._sig ), block_validate_exception, "signature validation failed" ); + EOS_ASSERT( fc::crypto::blslib::aggregate_verify( pubkeys, digests, qc._sig ), invalid_qc_claim, "signature validation failed" ); } std::optional block_state::get_best_qc() const { diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index e673c698ca..a8212fbbb2 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2999,18 +2999,18 @@ struct controller_impl { } /// apply_block // called from net threads and controller's thread pool - bool process_vote_message( const vote_message& vote ) { - auto do_vote = [&vote](auto& forkdb) -> std::pair> { + vote_status process_vote_message( const vote_message& vote ) { + auto do_vote = [&vote](auto& forkdb) -> std::pair> { auto bsp = forkdb.get_block(vote.proposal_id); if (bsp) return bsp->aggregate_vote(vote); - return {false, {}}; + return {vote_status::unknown_block, {}}; }; - auto [valid, new_lib] = fork_db.apply_if>>(do_vote); + auto [status, new_lib] = fork_db.apply_if>>(do_vote); if (new_lib) { set_if_irreversible_block_num(*new_lib); } - return valid; + return status; }; void create_and_send_vote_msg(const block_state_ptr& bsp) { @@ -3097,12 +3097,12 @@ struct controller_impl { // block doesn't have a header extension either. Then return early. // ------------------------------------------------------------------------------------------ EOS_ASSERT( !qc_extension_present, - block_validate_exception, + invalid_qc_claim, "Block #${b} includes a QC block extension, but doesn't have a finality header extension", ("b", block_num) ); EOS_ASSERT( !prev_header_ext, - block_validate_exception, + invalid_qc_claim, "Block #${b} doesn't have a finality header extension even though its predecessor does.", ("b", block_num) ); return; @@ -3118,7 +3118,7 @@ struct controller_impl { // ------------------------------------------------------------------------------------------------- if (!prev_header_ext) { EOS_ASSERT( !qc_extension_present && qc_claim.last_qc_block_num == block_num && qc_claim.is_last_qc_strong == false, - block_validate_exception, + invalid_qc_claim, "Block #${b}, which is the finality transition block, doesn't have the expected extensions", ("b", block_num) ); return; @@ -3136,7 +3136,7 @@ struct controller_impl { // new claimed QC block number cannot be smaller than previous block's EOS_ASSERT( qc_claim.last_qc_block_num >= prev_qc_claim.last_qc_block_num, - block_validate_exception, + invalid_qc_claim, "Block #${b} claims a last_qc_block_num (${n1}) less than the previous block's (${n2})", ("n1", qc_claim.last_qc_block_num)("n2", prev_qc_claim.last_qc_block_num)("b", block_num) ); @@ -3144,7 +3144,7 @@ struct controller_impl { if( qc_claim.is_last_qc_strong == prev_qc_claim.is_last_qc_strong ) { // QC block extension is redundant EOS_ASSERT( !qc_extension_present, - block_validate_exception, + invalid_qc_claim, "Block #${b} should not provide a QC block extension since its QC claim is the same as the previous block's", ("b", block_num) ); @@ -3155,14 +3155,14 @@ struct controller_impl { // new claimed QC must be stronger than previous if the claimed block number is the same EOS_ASSERT( qc_claim.is_last_qc_strong, - block_validate_exception, + invalid_qc_claim, "claimed QC (${s1}) must be stricter than previous block's (${s2}) if block number is the same. Block number: ${b}", ("s1", qc_claim.is_last_qc_strong)("s2", prev_qc_claim.is_last_qc_strong)("b", block_num) ); } // At this point, we are making a new claim in this block, so it better include a QC to justify this claim. EOS_ASSERT( qc_extension_present, - block_validate_exception, + invalid_qc_claim, "Block #${b} is making a new finality claim, but doesn't include a qc to justify this claim", ("b", block_num) ); const auto& qc_ext = std::get(block_exts.lower_bound(qc_ext_id)->second); @@ -3170,20 +3170,20 @@ struct controller_impl { // Check QC information in header extension and block extension match EOS_ASSERT( qc_proof.block_num == qc_claim.last_qc_block_num, - block_validate_exception, + invalid_qc_claim, "Block #${b}: Mismatch between qc.block_num (${n1}) in block extension and last_qc_block_num (${n2}) in header extension", ("n1", qc_proof.block_num)("n2", qc_claim.last_qc_block_num)("b", block_num) ); // Verify claimed strictness is the same as in proof EOS_ASSERT( qc_proof.qc.is_strong() == qc_claim.is_last_qc_strong, - block_validate_exception, + invalid_qc_claim, "QC is_strong (${s1}) in block extension does not match is_last_qc_strong (${s2}) in header extension. Block number: ${b}", ("s1", qc_proof.qc.is_strong())("s2", qc_claim.is_last_qc_strong)("b", block_num) ); // find the claimed block's block state on branch of id auto bsp = fork_db_fetch_bsp_by_num( prev.id, qc_claim.last_qc_block_num ); EOS_ASSERT( bsp, - block_validate_exception, + invalid_qc_claim, "Block state was not found in forkdb for last_qc_block_num ${q}. Block number: ${b}", ("q", qc_claim.last_qc_block_num)("b", block_num) ); @@ -4485,7 +4485,7 @@ void controller::set_proposed_finalizers( const finalizer_policy& fin_pol ) { } // called from net threads -bool controller::process_vote_message( const vote_message& vote ) { +vote_status controller::process_vote_message( const vote_message& vote ) { return my->process_vote_message( vote ); }; diff --git a/libraries/chain/hotstuff/hotstuff.cpp b/libraries/chain/hotstuff/hotstuff.cpp index e2f361ff1e..93642aebf7 100644 --- a/libraries/chain/hotstuff/hotstuff.cpp +++ b/libraries/chain/hotstuff/hotstuff.cpp @@ -20,18 +20,18 @@ inline std::vector bitset_to_vector(const hs_bitset& bs) { return r; } -bool pending_quorum_certificate::votes_t::add_vote(const std::vector& proposal_digest, size_t index, - const bls_public_key& pubkey, const bls_signature& new_sig) { +vote_status pending_quorum_certificate::votes_t::add_vote(const std::vector& proposal_digest, size_t index, + const bls_public_key& pubkey, const bls_signature& new_sig) { if (_bitset[index]) { - return false; // shouldn't be already present + return vote_status::duplicate; // shouldn't be already present } if (!fc::crypto::blslib::verify(pubkey, proposal_digest, new_sig)) { wlog( "signature from finalizer ${i} cannot be verified", ("i", index) ); - return false; + return vote_status::invalid_signature; } _bitset.set(index); _sig = fc::crypto::blslib::aggregate({_sig, new_sig}); // works even if _sig is default initialized (fp2::zero()) - return true; + return vote_status::success; } void pending_quorum_certificate::votes_t::reset(size_t num_finalizers) { @@ -59,11 +59,11 @@ bool pending_quorum_certificate::is_quorum_met() const { } // called by add_vote, already protected by mutex -bool pending_quorum_certificate::add_strong_vote(const std::vector& proposal_digest, size_t index, - const bls_public_key& pubkey, const bls_signature& sig, - uint64_t weight) { - if (!_strong_votes.add_vote(proposal_digest, index, pubkey, sig)) - return false; +vote_status pending_quorum_certificate::add_strong_vote(const std::vector& proposal_digest, size_t index, + const bls_public_key& pubkey, const bls_signature& sig, + uint64_t weight) { + if (auto s = _strong_votes.add_vote(proposal_digest, index, pubkey, sig); s != vote_status::success) + return s; _strong_sum += weight; switch (_state) { @@ -86,15 +86,15 @@ bool pending_quorum_certificate::add_strong_vote(const std::vector& pro // getting another strong vote...nothing to do break; } - return true; + return vote_status::success; } // called by add_vote, already protected by mutex -bool pending_quorum_certificate::add_weak_vote(const std::vector& proposal_digest, size_t index, - const bls_public_key& pubkey, const bls_signature& sig, - uint64_t weight) { - if (!_weak_votes.add_vote(proposal_digest, index, pubkey, sig)) - return false; +vote_status pending_quorum_certificate::add_weak_vote(const std::vector& proposal_digest, size_t index, + const bls_public_key& pubkey, const bls_signature& sig, + uint64_t weight) { + if (auto s = _weak_votes.add_vote(proposal_digest, index, pubkey, sig); s != vote_status::success) + return s; _weak_sum += weight; switch (_state) { @@ -121,17 +121,17 @@ bool pending_quorum_certificate::add_weak_vote(const std::vector& propo // getting another weak vote... nothing to do break; } - return true; + return vote_status::success; } // thread safe, -std::pair pending_quorum_certificate::add_vote(bool strong, const std::vector& proposal_digest, size_t index, - const bls_public_key& pubkey, const bls_signature& sig, - uint64_t weight) { +std::pair pending_quorum_certificate::add_vote(bool strong, const std::vector& proposal_digest, size_t index, + const bls_public_key& pubkey, const bls_signature& sig, + uint64_t weight) { std::lock_guard g(*_mtx); - bool valid = strong ? add_strong_vote(proposal_digest, index, pubkey, sig, weight) - : add_weak_vote(proposal_digest, index, pubkey, sig, weight); - return {valid, _state == state_t::strong}; + vote_status s = strong ? add_strong_vote(proposal_digest, index, pubkey, sig, weight) + : add_weak_vote(proposal_digest, index, pubkey, sig, weight); + return {s, _state == state_t::strong}; } // thread safe diff --git a/libraries/chain/hotstuff/test/finality_misc_tests.cpp b/libraries/chain/hotstuff/test/finality_misc_tests.cpp index 9f1726f07b..418d82116c 100644 --- a/libraries/chain/hotstuff/test/finality_misc_tests.cpp +++ b/libraries/chain/hotstuff/test/finality_misc_tests.cpp @@ -74,8 +74,8 @@ BOOST_AUTO_TEST_CASE(qc_state_transitions) try { // add duplicate weak vote // ----------------------- - bool ok = weak_vote(qc, digest, 0, weight); - BOOST_CHECK(!ok); // vote was a duplicate + auto ok = weak_vote(qc, digest, 0, weight); + BOOST_CHECK(ok != vote_status::success); // vote was a duplicate BOOST_CHECK_EQUAL(qc.state(), state_t::weak_achieved); BOOST_CHECK(qc.is_quorum_met()); diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 4548c2804f..9492ca0591 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -39,7 +39,7 @@ struct block_state : public block_header_state { // block_header_state provi void set_trxs_metas(deque&& trxs_metas, bool keys_recovered); const deque& trxs_metas() const { return cached_trxs; } - std::pair> aggregate_vote(const vote_message& vote); // aggregate vote into pending_qc + std::pair> aggregate_vote(const vote_message& vote); // aggregate vote into pending_qc void verify_qc(const valid_quorum_certificate& qc) const; // verify given qc is valid with respect block_state using bhs_t = block_header_state; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 6434849942..448ec80468 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -326,7 +327,7 @@ namespace eosio::chain { // called by host function set_finalizers void set_proposed_finalizers( const finalizer_policy& fin_set ); // called from net threads - bool process_vote_message( const vote_message& msg ); + vote_status process_vote_message( const vote_message& msg ); bool light_validation_allowed() const; bool skip_auth_check()const; diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 22aa819574..f7a41b640d 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -255,6 +255,8 @@ namespace eosio { namespace chain { 3030012, "Invalid block extension" ) FC_DECLARE_DERIVED_EXCEPTION( ill_formed_additional_block_signatures_extension, block_validate_exception, 3030013, "Block includes an ill-formed additional block signature extension" ) + FC_DECLARE_DERIVED_EXCEPTION( invalid_qc_claim, block_validate_exception, + 3030014, "Block includes an invalid QC claim" ) FC_DECLARE_DERIVED_EXCEPTION( transaction_exception, chain_exception, 3040000, "Transaction exception" ) diff --git a/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp b/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp index a11e32653d..e2003fa313 100644 --- a/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp +++ b/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp @@ -92,6 +92,14 @@ namespace eosio::chain { invalid // invalid message (other reason) }; + enum class vote_status { + success, + duplicate, + unknown_public_key, + invalid_signature, + unknown_block + }; + using bls_public_key = fc::crypto::blslib::bls_public_key; using bls_signature = fc::crypto::blslib::bls_signature; using bls_private_key = fc::crypto::blslib::bls_private_key; @@ -151,8 +159,8 @@ namespace eosio::chain { void resize(size_t num_finalizers) { _bitset.resize(num_finalizers); } size_t count() const { return _bitset.count(); } - bool add_vote(const std::vector& proposal_digest, size_t index, const bls_public_key& pubkey, - const bls_signature& new_sig); + vote_status add_vote(const std::vector& proposal_digest, size_t index, const bls_public_key& pubkey, + const bls_signature& new_sig); void reset(size_t num_finalizers); }; @@ -165,12 +173,12 @@ namespace eosio::chain { bool is_quorum_met() const; // thread safe - std::pair add_vote(bool strong, - const std::vector&proposal_digest, - size_t index, - const bls_public_key&pubkey, - const bls_signature&sig, - uint64_t weight); + std::pair add_vote(bool strong, + const std::vector&proposal_digest, + size_t index, + const bls_public_key&pubkey, + const bls_signature&sig, + uint64_t weight); state_t state() const { std::lock_guard g(*_mtx); return _state; }; valid_quorum_certificate to_valid_quorum_certificate() const; @@ -198,18 +206,18 @@ namespace eosio::chain { votes_t _strong_votes; // called by add_vote, already protected by mutex - bool add_strong_vote(const std::vector& proposal_digest, - size_t index, - const bls_public_key& pubkey, - const bls_signature& sig, - uint64_t weight); + vote_status add_strong_vote(const std::vector& proposal_digest, + size_t index, + const bls_public_key& pubkey, + const bls_signature& sig, + uint64_t weight); // called by add_vote, already protected by mutex - bool add_weak_vote(const std::vector& proposal_digest, - size_t index, - const bls_public_key& pubkey, - const bls_signature& sig, - uint64_t weight); + vote_status add_weak_vote(const std::vector& proposal_digest, + size_t index, + const bls_public_key& pubkey, + const bls_signature& sig, + uint64_t weight); }; } //eosio::chain diff --git a/libraries/chain/snapshot_scheduler.cpp b/libraries/chain/snapshot_scheduler.cpp index 38191222ac..b666a9360f 100644 --- a/libraries/chain/snapshot_scheduler.cpp +++ b/libraries/chain/snapshot_scheduler.cpp @@ -219,7 +219,8 @@ void snapshot_scheduler::create_snapshot(next_function nex std::error_code ec; fs::rename(temp_path, pending_path, ec); EOS_ASSERT(!ec, snapshot_finalization_exception, - "Unable to promote temp snapshot to pending for block number ${bn}: [code: ${ec}] ${message}", + "Unable to promote temp snapshot ${t} to pending ${p} for block number ${bn}: [code: ${ec}] ${message}", + ("t", temp_path.generic_string())("p", pending_path.generic_string()) ("bn", head_block_num)("ec", ec.value())("message", ec.message())); _pending_snapshot_index.emplace(head_id, next, pending_path.generic_string(), snapshot_path.generic_string()); add_pending_snapshot_info(snapshot_information{head_id, head_block_num, head_block_time, chain_snapshot_header::current_version, pending_path.generic_string()}); diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 0d240b5311..656b636b49 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -252,7 +252,24 @@ namespace eosio { namespace testing { transaction_trace_ptr set_producers(const vector& producer_names); transaction_trace_ptr set_producer_schedule(const vector& schedule); transaction_trace_ptr set_producers_legacy(const vector& producer_names); - transaction_trace_ptr set_finalizers(const vector& finalier_names); + + // libtester uses 1 as weight of each of the finalizer, sets (2/3 finalizers + 1) + // as threshold, and makes all finalizers vote QC + transaction_trace_ptr set_finalizers(const vector& finalizer_names); + + // Finalizer policy input to set up a test: weights, threshold and local finalizers + // which participate voting. + struct finalizer_policy_input { + struct finalizer_info { + account_name name; + uint64_t weight; + }; + + std::vector finalizers; + uint64_t threshold {0}; + std::vector local_finalizers; + }; + transaction_trace_ptr set_finalizers(const finalizer_policy_input& input); void link_authority( account_name account, account_name code, permission_name req, action_name type = {} ); void unlink_authority( account_name account, account_name code, action_name type = {} ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 85b78eb95a..fb6e0488d3 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -1188,27 +1188,46 @@ namespace eosio { namespace testing { } transaction_trace_ptr base_tester::set_finalizers(const vector& finalizer_names) { - uint64_t threshold = finalizer_names.size() * 2 / 3 + 1; + auto num_finalizers = finalizer_names.size(); + std::vector finalizers_info; + finalizers_info.reserve(num_finalizers); + for (const auto& f: finalizer_names) { + finalizers_info.push_back({.name = f, .weight = 1}); + } + + finalizer_policy_input policy_input = { + .finalizers = finalizers_info, + .threshold = num_finalizers * 2 / 3 + 1, + .local_finalizers = finalizer_names + }; - chain::bls_pub_priv_key_map_t finalizer_keys; + return set_finalizers(policy_input); + } + + transaction_trace_ptr base_tester::set_finalizers(const finalizer_policy_input& input) { + chain::bls_pub_priv_key_map_t local_finalizer_keys; fc::variants finalizer_auths; - for (const auto& n: finalizer_names) { - auto [privkey, pubkey, pop] = get_bls_key( n ); - finalizer_keys[pubkey.to_string()] = privkey.to_string(); + for (const auto& f: input.finalizers) { + auto [privkey, pubkey, pop] = get_bls_key( f.name ); + + // if it is a local finalizer, set up public to private key mapping for voting + if( auto it = std::ranges::find_if(input.local_finalizers, [&](const auto& name) { return name == f.name; }); it != input.local_finalizers.end()) { + local_finalizer_keys[pubkey.to_string()] = privkey.to_string(); + }; + finalizer_auths.emplace_back( fc::mutable_variant_object() - ("description", n.to_string() + " description") - ("weight", (uint64_t)1) + ("description", f.name.to_string() + " description") + ("weight", f.weight) ("public_key", pubkey.to_string({})) ("pop", pop.to_string({}))); } - // configure finalizer keys on controller for signing votes - control->set_node_finalizer_keys(finalizer_keys); + control->set_node_finalizer_keys(local_finalizer_keys); fc::mutable_variant_object fin_policy_variant; - fin_policy_variant("threshold", threshold); + fin_policy_variant("threshold", input.threshold); fin_policy_variant("finalizers", std::move(finalizer_auths)); return push_action( config::system_account_name, "setfinalizer"_n, config::system_account_name, diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index ab30bf3716..5111ecb420 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -261,13 +261,17 @@ namespace eosio { bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); // locks mutex public: + enum class closing_mode { + immediately, // closing connection immediately + handshake // sending handshake message + }; explicit sync_manager( uint32_t span, uint32_t sync_peer_limit, uint32_t min_blocks_distance ); static void send_handshakes(); bool syncing_from_peer() const { return sync_state == lib_catchup; } bool is_in_sync() const { return sync_state == in_sync; } void sync_reset_lib_num( const connection_ptr& conn, bool closing ); void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); - void rejected_block( const connection_ptr& c, uint32_t blk_num ); + void rejected_block( const connection_ptr& c, uint32_t blk_num, closing_mode mode ); void sync_recv_block( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ); void recv_handshake( const connection_ptr& c, const handshake_message& msg, uint32_t nblk_combined_latency ); void sync_recv_notice( const connection_ptr& c, const notice_message& msg ); @@ -2437,18 +2441,22 @@ namespace eosio { } // called from connection strand - void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { + void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num, closing_mode mode ) { c->block_status_monitor_.rejected(); fc::unique_lock g( sync_mtx ); sync_last_requested_num = 0; if (blk_num < sync_next_expected_num) { sync_next_expected_num = my_impl->get_chain_lib_num(); } - if( c->block_status_monitor_.max_events_violated()) { + if( mode == closing_mode::immediately || c->block_status_monitor_.max_events_violated()) { peer_wlog( c, "block ${bn} not accepted, closing connection", ("bn", blk_num) ); sync_source.reset(); g.unlock(); - c->close(); + if( mode == closing_mode::immediately ) { + c->close( false ); // do not reconnect + } else { + c->close(); + } } else { g.unlock(); peer_dlog(c, "rejected block ${bn}, sending handshake", ("bn", blk_num)); @@ -3691,8 +3699,22 @@ namespace eosio { ("bn", block_header::num_from_id(msg.proposal_id))("id", msg.proposal_id.str().substr(8,16)) ("t", msg.strong ? "strong" : "weak")("k", msg.finalizer_key.to_string().substr(8, 16))); controller& cc = my_impl->chain_plug->chain(); - if( cc.process_vote_message(msg) ) { - my_impl->bcast_vote_message(connection_id, msg); + + switch( cc.process_vote_message(msg) ) { + case vote_status::success: + my_impl->bcast_vote_message(connection_id, msg); + break; + case vote_status::unknown_public_key: + case vote_status::invalid_signature: // close peer immediately + close( false ); // do not reconnect after closing + break; + case vote_status::unknown_block: // track the failure + block_status_monitor_.rejected(); + break; + case vote_status::duplicate: // do nothing + break; + default: + assert(false); // should never happen } } @@ -3746,9 +3768,15 @@ namespace eosio { std::optional obt; bool exception = false; + sync_manager::closing_mode close_mode = sync_manager::closing_mode::handshake; try { // this may return null if block is not immediately ready to be processed obt = cc.create_block_handle( id, ptr ); + } catch( const invalid_qc_claim &ex) { + exception = true; + close_mode = sync_manager::closing_mode::immediately; + fc_wlog( logger, "invalid QC claim exception, connection ${cid}: #${n} ${id}...: ${m}", + ("cid", cid)("n", ptr->block_num())("id", id.str().substr(8,16))("m",ex.to_string())); } catch( const fc::exception& ex ) { exception = true; fc_ilog( logger, "bad block exception connection ${cid}: #${n} ${id}...: ${m}", @@ -3759,8 +3787,8 @@ namespace eosio { ("cid", cid)("n", ptr->block_num())("id", id.str().substr(8,16))); } if( exception ) { - c->strand.post( [c, id, blk_num=ptr->block_num()]() { - my_impl->sync_master->rejected_block( c, blk_num ); + c->strand.post( [c, id, blk_num=ptr->block_num(), close_mode]() { + my_impl->sync_master->rejected_block( c, blk_num, close_mode ); my_impl->dispatcher->rejected_block( id ); }); return; @@ -3873,7 +3901,7 @@ namespace eosio { } // reason==no_reason means accept_block() return false because we are producing, don't call rejected_block which sends handshake if( reason != no_reason ) { - sync_master->rejected_block( c, blk_num ); + sync_master->rejected_block( c, blk_num, sync_manager::closing_mode::handshake ); } dispatcher->rejected_block( blk_id ); }); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 23fceeb4fc..a0a9831c3d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -48,6 +48,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_producer_watermark_test.py ${C configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cli_test.py ${CMAKE_CURRENT_BINARY_DIR}/cli_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_test.py ${CMAKE_CURRENT_BINARY_DIR}/ship_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_streamer_test.py ${CMAKE_CURRENT_BINARY_DIR}/ship_streamer_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bridge_for_fork_test_shape.json ${CMAKE_CURRENT_BINARY_DIR}/bridge_for_fork_test_shape.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) @@ -138,9 +139,8 @@ set_property(TEST ship_if_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_streamer_test PROPERTY LABELS long_running_tests) -# TODO investigate failure: https://github.com/AntelopeIO/leap/issues/2172 -#add_test(NAME ship_streamer_if_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -#set_property(TEST ship_streamer_if_test PROPERTY LABELS long_running_tests) +add_test(NAME ship_streamer_if_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST ship_streamer_if_test PROPERTY LABELS long_running_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -172,14 +172,12 @@ add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactio set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME distributed-transactions-speculative-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 --speculative -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-speculative-test PROPERTY LABELS nonparallelizable_tests) -# requires https://github.com/AntelopeIO/leap/issues/2175 -#add_test(NAME distributed-transactions-if-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 --activate-if -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -#set_property(TEST distributed-transactions-if-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME distributed-transactions-if-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 --activate-if -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST distributed-transactions-if-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) -# requires https://github.com/AntelopeIO/leap/issues/2175 -#add_test(NAME restart-scenarios-if-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -#set_property(TEST restart-scenarios-if-test-resync PROPERTY LABELS nonparallelizable_tests) +add_test(NAME restart-scenarios-if-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-if-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-hard_replay PROPERTY LABELS nonparallelizable_tests) # requires https://github.com/AntelopeIO/leap/issues/2141 @@ -314,9 +312,8 @@ set_property(TEST nodeos_producer_watermark_if_lr_test PROPERTY LABELS long_runn add_test(NAME nodeos_high_transaction_lr_test COMMAND tests/nodeos_high_transaction_test.py -p 4 -n 8 --num-transactions 10000 --max-transactions-per-second 500 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_high_transaction_lr_test PROPERTY LABELS long_running_tests) -# requires https://github.com/AntelopeIO/leap/issues/2175 -#add_test(NAME nodeos_high_transaction_if_lr_test COMMAND tests/nodeos_high_transaction_test.py --activate-if -p 4 -n 8 --num-transactions 10000 --max-transactions-per-second 500 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -#set_property(TEST nodeos_high_transaction_if_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_high_transaction_if_lr_test COMMAND tests/nodeos_high_transaction_test.py --activate-if -p 4 -n 8 --num-transactions 10000 --max-transactions-per-second 500 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_high_transaction_if_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_retry_transaction_lr_test COMMAND tests/nodeos_retry_transaction_test.py -v --num-transactions 100 --max-transactions-per-second 10 --total-accounts 5 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_retry_transaction_lr_test PROPERTY LABELS long_running_tests) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index c678ca4364..792ee02430 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1,4 +1,3 @@ -import atexit import copy import subprocess import time @@ -76,7 +75,6 @@ def __init__(self, localCluster=True, host="localhost", port=8888, walletHost="l keepRunning: [True|False] If true, leave nodes running when Cluster is destroyed. Implies keepLogs. keepLogs: [True|False] If true, retain log files after cluster shuts down. """ - atexit.register(self.shutdown) self.accounts=[] self.nodes=[] self.unstartedNodes=[] @@ -999,9 +997,9 @@ def activateInstantFinality(self, launcher, biosFinalizer, pnodes): # call setfinalizer numFins = 0 for n in launcher.network.nodes.values(): - if n.keys[0].blspubkey is None: + if not n.keys or not n.keys[0].blspubkey: continue - if len(n.producers) == 0: + if not n.producers: continue if n.index == Node.biosNodeId and not biosFinalizer: continue @@ -1020,9 +1018,9 @@ def activateInstantFinality(self, launcher, biosFinalizer, pnodes): for n in launcher.network.nodes.values(): if n.index == Node.biosNodeId and not biosFinalizer: continue - if n.keys[0].blspubkey is None: + if not n.keys or not n.keys[0].blspubkey: continue - if len(n.producers) == 0: + if not n.producers: continue setFinStr += f' {{"description": "finalizer #{finNum}", ' setFinStr += f' "weight":1, ' diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 47a62d1471..fe98e97c1b 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -187,3 +187,5 @@ def reportProductionAnalysis(thresholdMs): cluster.testFailed = not testSuccessful if walletMgr: walletMgr.testFailed = not testSuccessful + + cluster.shutdown() diff --git a/tests/bridge_for_fork_test_shape.json b/tests/bridge_for_fork_test_shape.json new file mode 100644 index 0000000000..a97b7235f3 --- /dev/null +++ b/tests/bridge_for_fork_test_shape.json @@ -0,0 +1,126 @@ +{ + "name": "testnet_", + "ssh_helper": { + "ssh_cmd": "/usr/bin/ssh", + "scp_cmd": "/usr/bin/scp", + "ssh_identity": "", + "ssh_args": "" + }, + "nodes": { + "bios":{ + "name": "bios", + "keys": [ + { + "privkey":"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3", + "pubkey":"EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + } + ], + "peers": [], + "producers": [ + "eosio" + ], + "dont_start": false + }, + "testnet_00":{ + "name": "testnet_00", + "keys": [ + { + "privkey":"5Jf4sTk7vwX1MYpLJ2eQFanVvKYXFqGBrCyANPukuP2BJ5WAAKZ", + "pubkey":"EOS58B33q9S7oNkgeFfcoW3VJYu4obfDiqn5RHGE2ige6jVjUhymR", + "blspubkey":"PUB_BLS_2QQ72DAhKOWKfnBF77AnYn3GqD0M+Yh/05tqKNhqEQ0K4ixhIZ0rKbO2UuonqGAV1KYPgLzIfRz6zMD4iWI3FhOGE+UZ4Le5cELQ3NjOBFagG51XqM8Q1lpUqNanhCoDyfFnLg==", + "blsprivkey":"PVT_BLS_XwmVWf21N/j+hYJfo5+VHN1BtMY2wmKdQ7unaX/rzk+EJ5PX", + "blspop":"SIG_BLS_jvAPOOkvw19wuEzIt1ot8tn6aLeP55XQtSIY2eP3DMcZvEcdmlWVqNI/M8VNKL8RiN2F7XrRZ6O5cPPh4f3B/XfHOyUd3UXG3p++9m0tu0jCojtWQd6wJmTIR1LQ6DUWAQwBOx8Rd70FoznDEqJS/RZBV03x9FpBDQH7VB6BYs9UztynlWrL8LZaRbi8WNwF9CDzUJJsmOmHMnZO5qcTuo/cmSgV1X03bITdQ4IGq06yExBPepIX9ZZu5XH4QCIBo/fIcg==" + } + ], + "peers": [ + "bios", + "testnet_01", + "testnet_02", + "testnet_04" + ], + "producers": [ + "defproducera" + ], + "dont_start": false + }, + "testnet_01":{ + "name": "testnet_01", + "keys": [ + { + "privkey":"5HviUPkTEtvF2B1nm8aZUnjma2TzgpKRjuXjwHyy3FME4xDbkZF", + "pubkey":"EOS5CbcTDgbks2ptTxvyCbT9HFbzX7PDHUY2wN4DDnVBhhQr2ZNDE", + "blspubkey":"PUB_BLS_g86vgFO5G0bcRuaEA95kNFxnsHyzVSOthKKN8MSJ2zLWj+WfCbIBIO73OxgzjVsZarSuMQrcbVu2MktqF6PGlPkPaSuJGnES3FQ0OAfebOMAsPeAd23Ge/3+cPl2OVgXSmHdhA==", + "blsprivkey":"PVT_BLS_AtgyGDKJdQWvCNyGJgyu9bWpMS7eQE07zB2nGTlhZ0nCX11C", + "blspop":"SIG_BLS_pzPEYt1zLPVbofA1YABSPb1gJdvUdUhREa+pQsj2eTSaEBEnb+w+AwO0cQLgYSYWNWRePIUUvj5MCWqlfIU5ulBL8tqlwdCqQ0o6W915axLq2l1qnbFK/XfN9dRxdJgWPdl57bCGmoii25gdyobgLUZaJzPfivE6iQ981IgGACAb5CRdVH5hPZq8Rab1O64OclwCT/8ho8TdcKoSQj0njbAfp9JZxv5EyuAkaNIQun9rn+vH++37n+nDeV6UgCUEzex3cQ==" + } + ], + "peers": [ + "bios", + "testnet_00", + "testnet_02", + "testnet_04" + ], + "producers": [ + "defproducerb" + ], + "dont_start": false + }, + "testnet_02":{ + "name": "testnet_02", + "keys": [ + { + "privkey":"5KkQbdxFHr8Pg1N3DEMDdU7emFgUTwQvh99FDJrodFhUbbsAtQT", + "pubkey":"EOS6Tkpf8kcDfa32WA9B4nTcEJ64ZdDMSNioDcaL6rzdMwnpzaWJB", + "blspubkey":"PUB_BLS_PerMKMuQdZ3N6NEOoQRdlB1BztNWAeHkmzqwcFwEQGEM8QMfv3mrrepX5yM4NKQHYDnfcPIQPpDt0gCi6afvpZgN0JHT4jUaNlbfsJKtbm5mOJcqggddamCKEz2lBC0OS2D5yw==", + "blsprivkey":"PVT_BLS_n4AshIQiCqCdIOC/lGkKauVOFE2KelMb3flVvodVsji15FHW", + "blspop":"SIG_BLS_oqOzQYpJRvQ88ExpJKmwgna29eXM5umPpLmjfHcdcUUKwS3NMWwvP1hLwLcj4XcU6CuM3RzfRo6PPE2sxrp2fUWpqP0bsuamcOOyF+V6TfJMYuDbepc1Jp9HUdli3X0QE6hL+umbO2PWE4KiCSn9tj9LRyXgc41IY7R/JeQCCQSNXMSWhebdB/KCapfxq8sYEzRhXcZik+bXUDC1AcLXaocvNV6o2nKHtJwQ7YyGXCvFXgMMcQ3PWFlQ8WErmxILOM3Z/w==" + } + ], + "peers": [ + "bios", + "testnet_01", + "testnet_00", + "testnet_04" + ], + "producers": [ + "defproducerc" + ], + "dont_start": false + }, + "testnet_03":{ + "name": "testnet_03", + "keys": [ + { + "privkey":"5JxTJJegQBpEL1p77TzkN1ompMB9gDwAfjM9chPzFCB4chxmwrE", + "pubkey":"EOS52ntDHqA2qj4xVo7KmxdezMRhvvBqpZBuKYJCsgihisxmywpAx", + "blspubkey":"PUB_BLS_6C3UlotUoDwruilh6gE+qlKsqY7VrmT6eT3aTr9fC0kZUkQRo13/xMh7MZerbBED2Rho72BLHIaWnT01LLsCFIZg9pSyHBFt3EcKa4p6OyvTkQAFxNb739EYcTVx2n8Gi0d+iw==", + "blsprivkey":"PVT_BLS_Tw2Lozr/Qw2/uf13xo6vduAWqzJnWu2o0/s9WalErmkq4RPV", + "blspop":"SIG_BLS_mrKA0CFFTP3udLsaWH67ilVf/5dcCHfzJ+P8i+dEuVg4y+td8uyghJqDxnPoitMEjjSqP12kmSZciDXqWD+uGU7nY1YeDK5Tvi7cvd1qSOuATjDuW+amc/5SKp73NLsYwqVFcIex4XF+Quu/NRfDCfLj9ZRPtmuNAUevi2iz0ExeOkQTjQhKksb9ihN+6w4Wk0vJEjt0KbbW2Ny46J+P7PbanH34X9iCV3dT+lqnyp9om0hxKJJIH2R6P5hC2d8Ry8FBAw==" + } + ], + "peers": [ + "bios", + "testnet_04" + ], + "producers": [ + "defproducerd" + ], + "dont_start": false + }, + "testnet_04":{ + "name": "testnet_04", + "keys": [ + ], + "peers": [ + "bios", + "testnet_00", + "testnet_01", + "testnet_02", + "testnet_03" + ], + "producers": [ + ], + "dont_start": false + } + } +} diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 932b8f3015..e7f38371db 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -7,16 +7,16 @@ import signal import sys -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys +from TestHarness import Cluster, TestHelper, Utils, WalletMgr from TestHarness.TestHelper import AppArgs ############################################################### # ship_streamer_test # -# This test sets up 2 producing nodes and one "bridge" node using test_control_api_plugin. -# One producing node has 3 of the elected producers and the other has 1 of the elected producers. -# All the producers are named in alphabetical order, so that the 3 producers, in the one production node, are -# scheduled first, followed by the 1 producer in the other producer node. Each producing node is only connected +# This test sets up 4 producing nodes and one "bridge" node using test_control_api_plugin. +# One side of bridge has 3 of the elected producers and the other has 1 of the elected producers. +# All the producers are named in alphabetical order, so that the 3 producers, in the one production side, are +# scheduled first, followed by the 1 producer in the other producer node. Each producing side is only connected # to the other producing node via the "bridge" node. # The bridge node has the test_control_api_plugin, that the test uses to kill # the "bridge" node to generate a fork. @@ -39,7 +39,7 @@ dumpErrorDetails=args.dump_error_details walletPort=TestHelper.DEFAULT_WALLET_PORT -totalProducerNodes=2 +totalProducerNodes=4 totalNonProducerNodes=1 totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=21 @@ -66,112 +66,71 @@ def getLatestSnapshot(nodeId): # *** setup topogrophy *** - # "bridge" shape connects defprocera through defproducerc (3 in node0) to each other and defproduceru (1 in node1) - # and the only connection between those 2 groups is through the bridge node + # "bridge" shape connects defproducera (node0) defproducerb (node1) defproducerc (node2) to each other and defproducerd (node3) + # and the only connection between those 2 groups is through the bridge (node4) - shipNodeNum = 1 + shipNodeNum = 3 specificExtraNodeosArgs={} specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --trace-history --chain-state-history --state-history-stride 200 --plugin eosio::net_api_plugin --plugin eosio::producer_api_plugin " # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin " - if cluster.launch(topo="bridge", pnodes=totalProducerNodes, - totalNodes=totalNodes, totalProducers=totalProducers, activateIF=activateIF, biosFinalizer=False, + if cluster.launch(topo="./tests/bridge_for_fork_test_shape.json", pnodes=totalProducerNodes, loadSystemContract=False, + totalNodes=totalNodes, totalProducers=totalProducerNodes, activateIF=activateIF, biosFinalizer=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") - Utils.errorExit("Failed to stand up eos cluster.") + Utils.errorExit("Failed to stand up cluster.") # *** identify each node (producers and non-producing node) *** - #verify nodes are in sync and advancing + # verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) Print("Cluster in Sync") - prodNode = cluster.getNode(0) - prodNode0 = prodNode - prodNode1 = cluster.getNode(1) - nonProdNode = cluster.getNode(2) + prodNode0 = cluster.getNode(0) + prodNode3 = cluster.getNode(3) + nonProdNode = cluster.getNode(4) shipNode = cluster.getNode(shipNodeNum) - - accounts=createAccountKeys(6) - if accounts is None: - Utils.errorExit("FAILURE - create keys") - - accounts[0].name="testeraaaaaa" - accounts[1].name="tester111111" # needed for voting - accounts[2].name="tester222222" # needed for voting - accounts[3].name="tester333333" # needed for voting - accounts[4].name="tester444444" # needed for voting - accounts[5].name="tester555555" # needed for voting - - testWalletName="test" - - Print(f"Creating wallet {testWalletName}.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4],accounts[5]]) - - for _, account in cluster.defProducerAccounts.items(): - walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True) - - for i in range(0, totalNodes): - node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) - for prod in node.producers: - prodName = cluster.defProducerAccounts[prod].name - if prodName == "defproducera" or prodName == "defproducerb" or prodName == "defproducerc" or prodName == "defproduceru": - Print(f"Register producer {prodName}") - trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) - - # create accounts via eosio as otherwise a bid is needed - transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) - for account in accounts: - Print(f"Create new account {account.name} via {cluster.eosioAccount.name} with private key: {account.activePrivateKey}") - trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) - nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - for account in accounts: - Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") - trans=nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) - nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - for account in accounts: - trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) - nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - - # *** vote using accounts *** - - cluster.waitOnClusterSync(blockAdvancing=3) + # cluster.waitOnClusterSync(blockAdvancing=3) start_block_num = shipNode.getBlockNum() - # vote a,b,c (node0) u (node1) - voteProducers=[] - voteProducers.append("defproducera") - voteProducers.append("defproducerb") - voteProducers.append("defproducerc") - voteProducers.append("defproduceru") - for account in accounts: - Print(f"Account {account.name} vote for producers={voteProducers}") - trans=prodNode.vote(account, voteProducers, exitOnError=True, waitForTransBlock=False) - #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=3) Print("Shutdown unneeded bios node") cluster.biosNode.kill(signal.SIGTERM) + Print("Create a jumbo row") + contract = "jumborow" + contractDir = "unittests/contracts/%s" % (contract) + wasmFile = "%s.wasm" % (contract) + abiFile = "%s.abi" % (contract) + + nonProdNode.publishContract(cluster.defproducerbAccount, contractDir, wasmFile, abiFile) + jumbotxn = { + + "actions": [{"account": "defproducerb","name": "jumbotime", + "authorization": [{"actor": "defproducerb","permission": "active"}], + "data": "", + "compression": "none"}] + } + nonProdNode.pushTransaction(jumbotxn) + Print("Configure and launch txn generators") targetTpsPerGenerator = 10 testTrxGenDurationSec=60*60 numTrxGenerators=2 - cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name, accounts[1].name], - acctPrivKeysList=[accounts[0].activePrivateKey,accounts[1].activePrivateKey], nodeId=prodNode1.nodeId, + cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[cluster.defproduceraAccount.name, cluster.defproducerbAccount.name], + acctPrivKeysList=[cluster.defproduceraAccount.activePrivateKey,cluster.defproducerbAccount.activePrivateKey], nodeId=prodNode3.nodeId, tpsPerGenerator=targetTpsPerGenerator, numGenerators=numTrxGenerators, durationSec=testTrxGenDurationSec, waitToComplete=False) - status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode1.nodeId, numGenerators=numTrxGenerators) + status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode3.nodeId, numGenerators=numTrxGenerators) assert status is not None and status is not False, "ERROR: Failed to spinup Transaction Generators" prodNode0.waitForProducer("defproducerc") - prodNode0.waitForProducer("defproducera") - block_range = 450 + block_range = 250 end_block_num = start_block_num + block_range shipClient = "tests/ship_streamer" @@ -196,23 +155,31 @@ def getLatestSnapshot(nodeId): Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") # Generate a fork - prodNode1Prod="defproduceru" + prodNode3Prod= "defproducerd" preKillBlockNum=nonProdNode.getBlockNum() preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) - forkAtProducer="defproducer" + chr(ord(preKillBlockProducer[-1])+2) + forkAtProducer="defproducerb" nonProdNode.killNodeOnProducer(producer=forkAtProducer, whereInSequence=1) Print(f"Current block producer {preKillBlockProducer} fork will be at producer {forkAtProducer}") - prodNode0.waitForProducer(forkAtProducer) - prodNode1.waitForProducer(prodNode1Prod) - if nonProdNode.verifyAlive(): # if on defproducera, need to wait again - prodNode0.waitForProducer(forkAtProducer) - prodNode1.waitForProducer(prodNode1Prod) + prodNode0.waitForProducer("defproducera") + prodNode3.waitForProducer(prodNode3Prod) + if nonProdNode.verifyAlive(): + prodNode0.waitForProducer("defproducera") + prodNode3.waitForProducer(prodNode3Prod) if nonProdNode.verifyAlive(): Utils.errorExit("Bridge did not shutdown") Print("Fork started") - forkProgress="defproducer" + chr(ord(forkAtProducer[-1])+3) - prodNode0.waitForProducer(forkProgress) # wait for fork to progress a bit + prodNode0.waitForProducer("defproducerc") # wait for fork to progress a bit + restore0BlockNum = prodNode0.getBlockNum() + restore1BlockNum = prodNode3.getBlockNum() + restoreBlockNum = max(int(restore0BlockNum), int(restore1BlockNum)) + restore0LIB = prodNode0.getIrreversibleBlockNum() + restore1LIB = prodNode3.getIrreversibleBlockNum() + restoreLIB = max(int(restore0LIB), int(restore1LIB)) + + if int(restoreBlockNum) > int(end_block_num): + Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {restoreBlockNum}, increase block_range {block_range}") Print("Restore fork") Print("Relaunching the non-producing bridge node to connect the producing nodes again") @@ -222,10 +189,11 @@ def getLatestSnapshot(nodeId): Utils.errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted") nonProdNode.waitForProducer(forkAtProducer) - nonProdNode.waitForProducer(prodNode1Prod) + nonProdNode.waitForProducer(prodNode3Prod) + nonProdNode.waitForIrreversibleBlock(restoreLIB+1) afterForkBlockNum = nonProdNode.getBlockNum() - if int(afterForkBlockNum) < int(end_block_num): - Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {afterForkBlockNum}, increase block_range {block_range}") + + assert shipNode.findInLog(f"successfully switched fork to new head"), f"No fork found in log {shipNode}" Print(f"Stopping all {args.num_clients} clients") for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): @@ -255,51 +223,55 @@ def getLatestSnapshot(nodeId): Print("Shutdown bridge node") nonProdNode.kill(signal.SIGTERM) - Print("Test starting ship from snapshot") - Utils.rmNodeDataDir(shipNodeNum) - isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) - assert isRelaunchSuccess, "relaunch from snapshot failed" - - afterSnapshotBlockNum = shipNode.getBlockNum() - - Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") - start_block_num = afterSnapshotBlockNum - block_range = 0 - end_block_num = start_block_num + block_range - cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" - if Utils.Debug: Utils.Print(f"cmd: {cmd}") - clients = [] - files = [] - starts = [] - for i in range(0, args.num_clients): - start = time.perf_counter() - outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") - errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") - Print(f"Start client {i}") - popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) - starts.append(time.perf_counter()) - clients.append((popen, cmd)) - files.append((outFile, errFile)) - Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") - - Print(f"Stopping all {args.num_clients} clients") - for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): - popen.wait() - Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") - out.close() - err.close() - outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") - data = json.load(outFile) - block_num = start_block_num - for i in data: - # fork can cause block numbers to be repeated - this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] - if this_block_num < block_num: - block_num = this_block_num - assert block_num == this_block_num, f"{block_num} != {this_block_num}" - assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result - block_num += 1 - assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + ## + ## Following requires https://github.com/AntelopeIO/leap/issues/1558 + ## + if not activateIF: + Print("Test starting ship from snapshot") + Utils.rmNodeDataDir(shipNodeNum) + isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) + assert isRelaunchSuccess, "relaunch from snapshot failed" + + afterSnapshotBlockNum = shipNode.getBlockNum() + + Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") + start_block_num = afterSnapshotBlockNum + block_range = 0 + end_block_num = start_block_num + block_range + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") + clients = [] + files = [] + starts = [] + for i in range(0, args.num_clients): + start = time.perf_counter() + outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") + Print(f"Start client {i}") + popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) + starts.append(time.perf_counter()) + clients.append((popen, cmd)) + files.append((outFile, errFile)) + Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") + + Print(f"Stopping all {args.num_clients} clients") + for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): + popen.wait() + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") + out.close() + err.close() + outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") + data = json.load(outFile) + block_num = start_block_num + for i in data: + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" + assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result + block_num += 1 + assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" testSuccessful = True finally: diff --git a/tests/trace_plugin_test.py b/tests/trace_plugin_test.py index baa4472112..60093b48c2 100755 --- a/tests/trace_plugin_test.py +++ b/tests/trace_plugin_test.py @@ -117,6 +117,7 @@ def setUpClass(self): @classmethod def tearDownClass(self): TraceApiPluginTest.cluster.testFailed = not testSuccessful + TraceApiPluginTest.cluster.shutdown() if __name__ == "__main__": unittest.main() diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index f070981b1e..889225715f 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -3917,4 +3917,118 @@ BOOST_AUTO_TEST_CASE(set_finalizer_test) { try { BOOST_CHECK_GT(lib, lib_after_transition); } FC_LOG_AND_RETHROW() } +void test_finality_transition(const vector& accounts, const base_tester::finalizer_policy_input& input, bool lib_advancing_expected) { + validating_tester t; + + uint32_t lib = 0; + t.control->irreversible_block.connect([&](const block_signal_params& t) { + const auto& [ block, id ] = t; + lib = block->block_num(); + }); + + t.produce_block(); + + // Create finalizer accounts + t.create_accounts(accounts); + t.produce_block(); + + // activate hotstuff + t.set_finalizers(input); + auto block = t.produce_block(); // this block contains the header extension for the instant finality + + std::optional ext = block->extract_header_extension(instant_finality_extension::extension_id()); + BOOST_TEST(!!ext); + std::optional fin_policy = std::get(*ext).new_finalizer_policy; + BOOST_TEST(!!fin_policy); + BOOST_TEST(fin_policy->finalizers.size() == accounts.size()); + BOOST_TEST(fin_policy->generation == 1); + + block = t.produce_block(); // hotstuff now active + BOOST_TEST(block->confirmed == 0); + auto fb = t.control->fetch_block_by_id(block->calculate_id()); + BOOST_REQUIRE(!!fb); + BOOST_TEST(fb == block); + ext = fb->extract_header_extension(instant_finality_extension::extension_id()); + BOOST_REQUIRE(ext); + + auto lib_after_transition = lib; + + t.produce_blocks(4); + if( lib_advancing_expected ) { + BOOST_CHECK_GT(lib, lib_after_transition); + } else { + BOOST_CHECK_EQUAL(lib, lib_after_transition); + } +} + +BOOST_AUTO_TEST_CASE(threshold_equal_to_half_weight_sum_test) { try { + vector account_names = { + "alice"_n, "bob"_n, "carol"_n + }; + + // threshold set to half of the weight sum of finalizers + base_tester::finalizer_policy_input policy_input = { + .finalizers = { {.name = "alice"_n, .weight = 1}, + {.name = "bob"_n, .weight = 2}, + {.name = "carol"_n, .weight = 3} }, + .threshold = 3, + .local_finalizers = {"alice"_n, "bob"_n} + }; + + // threshold must be greater than half of the sum of the weights + BOOST_REQUIRE_THROW( test_finality_transition(account_names, policy_input, false), eosio_assert_message_exception ); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(votes_equal_to_threshold_test) { try { + vector account_names = { + "alice"_n, "bob"_n, "carol"_n + }; + + base_tester::finalizer_policy_input policy_input = { + .finalizers = { {.name = "alice"_n, .weight = 1}, + {.name = "bob"_n, .weight = 3}, + {.name = "carol"_n, .weight = 5} }, + .threshold = 5, + .local_finalizers = {"carol"_n} + }; + + // Carol votes with weight 5 and threshold 5 + test_finality_transition(account_names, policy_input, true); // lib_advancing_expected +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(votes_greater_than_threshold_test) { try { + vector account_names = { + "alice"_n, "bob"_n, "carol"_n + }; + + base_tester::finalizer_policy_input policy_input = { + .finalizers = { {.name = "alice"_n, .weight = 1}, + {.name = "bob"_n, .weight = 4}, + {.name = "carol"_n, .weight = 2} }, + .threshold = 4, + .local_finalizers = {"alice"_n, "bob"_n} + }; + + // alice and bob vote with weight 5 and threshold 4 + test_finality_transition(account_names, policy_input, true); // lib_advancing_expected +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(votes_less_than_threshold_test) { try { + vector account_names = { + "alice"_n, "bob"_n, "carol"_n + }; + + base_tester::finalizer_policy_input policy_input = { + .finalizers = { {.name = "alice"_n, .weight = 1}, + {.name = "bob"_n, .weight = 3}, + {.name = "carol"_n, .weight = 10} }, + .threshold = 8, + .local_finalizers = {"alice"_n, "bob"_n} + }; + + // alice and bob vote with weight 4 but threshold 8. LIB cannot advance + test_finality_transition(account_names, policy_input, false); // not expecting lib advancing +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/block_state_tests.cpp b/unittests/block_state_tests.cpp index eb1295e41a..dfe074afa4 100644 --- a/unittests/block_state_tests.cpp +++ b/unittests/block_state_tests.cpp @@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE(aggregate_vote_test) try { bool strong = (i % 2 == 0); // alternate strong and weak auto sig = strong ? private_key[i].sign(strong_digest_data) : private_key[i].sign(weak_digest_data); vote_message vote{ block_id, strong, public_key[i], sig }; - BOOST_REQUIRE(bsp->aggregate_vote(vote).first); + BOOST_REQUIRE(bsp->aggregate_vote(vote).first == vote_status::success); } } @@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE(aggregate_vote_test) try { bsp->pending_qc = pending_quorum_certificate{ num_finalizers, 1, bsp->active_finalizer_policy->max_weak_sum_before_weak_final() }; vote_message vote {block_id, true, public_key[0], private_key[1].sign(strong_digest_data) }; - BOOST_REQUIRE(!bsp->aggregate_vote(vote).first); + BOOST_REQUIRE(bsp->aggregate_vote(vote).first != vote_status::success); } { // duplicate votes @@ -70,8 +70,9 @@ BOOST_AUTO_TEST_CASE(aggregate_vote_test) try { bsp->pending_qc = pending_quorum_certificate{ num_finalizers, 1, bsp->active_finalizer_policy->max_weak_sum_before_weak_final() }; vote_message vote {block_id, true, public_key[0], private_key[0].sign(strong_digest_data) }; - BOOST_REQUIRE(bsp->aggregate_vote(vote).first); - BOOST_REQUIRE(!bsp->aggregate_vote(vote).first); + + BOOST_REQUIRE(bsp->aggregate_vote(vote).first == vote_status::success); // first time succeeds + BOOST_REQUIRE(bsp->aggregate_vote(vote).first != vote_status::success); // second time failed due to duplicate voting } { // public key does not exit in finalizer set @@ -84,7 +85,7 @@ BOOST_AUTO_TEST_CASE(aggregate_vote_test) try { bls_public_key new_public_key{ new_private_key.get_public_key() }; vote_message vote {block_id, true, new_public_key, private_key[0].sign(strong_digest_data) }; - BOOST_REQUIRE(!bsp->aggregate_vote(vote).first); + BOOST_REQUIRE(bsp->aggregate_vote(vote).first != vote_status::success); } } FC_LOG_AND_RETHROW(); @@ -129,7 +130,7 @@ void do_quorum_test(const std::vector& weights, if( to_vote[i] ) { auto sig = strong ? private_key[i].sign(strong_digest_data) : private_key[i].sign(weak_digest_data); vote_message vote{ block_id, strong, public_key[i], sig }; - BOOST_REQUIRE(bsp->aggregate_vote(vote).first); + BOOST_REQUIRE(bsp->aggregate_vote(vote).first == vote_status::success); } } diff --git a/unittests/contracts/CMakeLists.txt b/unittests/contracts/CMakeLists.txt index 9293c7cc13..60b1f6cc60 100644 --- a/unittests/contracts/CMakeLists.txt +++ b/unittests/contracts/CMakeLists.txt @@ -27,3 +27,4 @@ add_subdirectory(eosio.system) add_subdirectory(eosio.token) add_subdirectory(eosio.wrap) add_subdirectory(eosio.mechanics) +add_subdirectory(jumborow) \ No newline at end of file diff --git a/unittests/contracts/jumborow/CMakeLists.txt b/unittests/contracts/jumborow/CMakeLists.txt new file mode 100644 index 0000000000..f0a14a49ea --- /dev/null +++ b/unittests/contracts/jumborow/CMakeLists.txt @@ -0,0 +1,2 @@ +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/jumborow.wasm ${CMAKE_CURRENT_BINARY_DIR}/jumborow.wasm COPYONLY ) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/jumborow.abi ${CMAKE_CURRENT_BINARY_DIR}/jumborow.abi COPYONLY ) diff --git a/unittests/contracts/jumborow/jumborow.abi b/unittests/contracts/jumborow/jumborow.abi new file mode 100644 index 0000000000..c8f7eaac7c --- /dev/null +++ b/unittests/contracts/jumborow/jumborow.abi @@ -0,0 +1,23 @@ +{ + "version": "eosio::abi/1.0", + "types": [], + "structs": [{ + "name": "jumbo", + "base": "", + "fields": [] + } + ], + "actions": [{ + "name": "jumbotime", + "type": "jumbo", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [], + "action_results": [] +} + diff --git a/unittests/contracts/jumborow/jumborow.wasm b/unittests/contracts/jumborow/jumborow.wasm new file mode 100644 index 0000000000..050ccce0bb Binary files /dev/null and b/unittests/contracts/jumborow/jumborow.wasm differ diff --git a/unittests/contracts/jumborow/jumborow.wast b/unittests/contracts/jumborow/jumborow.wast new file mode 100644 index 0000000000..d9f0838e58 --- /dev/null +++ b/unittests/contracts/jumborow/jumborow.wast @@ -0,0 +1,8 @@ +(module + (import "env" "db_store_i64" (func $db_store_i64 (param i64 i64 i64 i64 i32 i32) (result i32))) + (memory $0 528) + (export "apply" (func $apply)) + (func $apply (param $receiver i64) (param $account i64) (param $action_name i64) + (drop (call $db_store_i64 (local.get $receiver) (local.get $receiver) (local.get $receiver) (i64.const 0) (i32.const 1) (i32.const 34603007))) + ) +) diff --git a/unittests/contracts/test_wasts.hpp b/unittests/contracts/test_wasts.hpp index 1eab70405e..94f5a9f249 100644 --- a/unittests/contracts/test_wasts.hpp +++ b/unittests/contracts/test_wasts.hpp @@ -990,4 +990,15 @@ static const char negative_memory_grow_trap_wast[] = R"=====( ) ) ) +)====="; + +static const char set_jumbo_row_wast[] = R"=====( +(module + (import "env" "db_store_i64" (func $db_store_i64 (param i64 i64 i64 i64 i32 i32) (result i32))) + (memory $0 528) + (export "apply" (func $apply)) + (func $apply (param $receiver i64) (param $account i64) (param $action_name i64) + (drop (call $db_store_i64 (get_local $receiver) (get_local $receiver) (get_local $receiver) (i64.const 0) (i32.const 1) (i32.const 34603007))) + ) +) )====="; \ No newline at end of file diff --git a/unittests/producer_schedule_if_tests.cpp b/unittests/producer_schedule_if_tests.cpp index a0d54f1c4b..9d84ded4af 100644 --- a/unittests/producer_schedule_if_tests.cpp +++ b/unittests/producer_schedule_if_tests.cpp @@ -19,9 +19,6 @@ inline account_name get_expected_producer(const vector& sche } // anonymous namespace -#warning TODO Enable test, currently SEGFAULTing https://github.com/AntelopeIO/leap/issues/2175 -#if 0 - BOOST_FIXTURE_TEST_CASE( verify_producer_schedule_after_instant_finality_activation, validating_tester ) try { // Utility function to ensure that producer schedule work as expected @@ -110,249 +107,88 @@ BOOST_FIXTURE_TEST_CASE( verify_producer_schedule_after_instant_finality_activat } FC_LOG_AND_RETHROW() -/** TODO: Enable tests after instant_finality LIB is working +bool compare_schedules( const vector& a, const producer_authority_schedule& b ) { + return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); +}; -BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, validating_tester ) try { +BOOST_FIXTURE_TEST_CASE( proposer_policy_progression_test, validating_tester ) try { create_accounts( {"alice"_n,"bob"_n,"carol"_n} ); + while (control->head_block_num() < 3) { produce_block(); } // activate instant_finality set_finalizers({"alice"_n,"bob"_n,"carol"_n}); - auto block = produce_block(); // this block contains the header extension of the finalizer set + produce_block(); // this block contains the header extension of the finalizer set - auto compare_schedules = [&]( const vector& a, const producer_authority_schedule& b ) { - return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); - }; + // current proposer schedule stays the same as the one prior to IF transition + vector prev_sch = { + producer_authority{"eosio"_n, block_signing_authority_v0{1, {{get_public_key("eosio"_n, "active"), 1}}}}}; + BOOST_CHECK_EQUAL( true, compare_schedules( prev_sch, control->active_producers() ) ); - auto res = set_producers( {"alice"_n,"bob"_n} ); + // set a new proposer policy sch1 + set_producers( {"alice"_n} ); vector sch1 = { - producer_authority{"alice"_n, block_signing_authority_v0{1, {{get_public_key("alice"_n, "active"), 1}}}}, - producer_authority{"bob"_n, block_signing_authority_v0{1, {{get_public_key("bob"_n, "active"), 1}}}} + producer_authority{"alice"_n, block_signing_authority_v0{1, {{get_public_key("alice"_n, "active"), 1}}}} }; - //wdump((fc::json::to_pretty_string(res))); - wlog("set producer schedule to [alice,bob]"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().has_value() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 0u ); - produce_block(); // Starts new block which promotes the proposed schedule to pending - BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->pending_producers() ) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 0u ); - produce_block(); - produce_block(); // Starts new block which promotes the pending schedule to active - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(6); - res = set_producers( {"alice"_n,"bob"_n,"carol"_n} ); + // start a round of production + produce_blocks(config::producer_repetitions); + + // sch1 cannot become active before one round of production + BOOST_CHECK_EQUAL( true, compare_schedules( prev_sch, control->active_producers() ) ); + + // set another ploicy to have multiple pending different active time policies + set_producers( {"bob"_n,"carol"_n} ); vector sch2 = { - producer_authority{"alice"_n, block_signing_authority_v0{1, {{get_public_key("alice"_n, "active"),1}}}}, - producer_authority{"bob"_n, block_signing_authority_v0{1, {{get_public_key("bob"_n, "active"),1}}}}, - producer_authority{"carol"_n, block_signing_authority_v0{1, {{get_public_key("carol"_n, "active"),1}}}} + producer_authority{"bob"_n, block_signing_authority_v0{ 1, {{get_public_key("bob"_n, "active"),1}}}}, + producer_authority{"carol"_n, block_signing_authority_v0{ 1, {{get_public_key("carol"_n, "active"),1}}}} }; - wlog("set producer schedule to [alice,bob,carol]"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().has_value() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *control->proposed_producers() ) ); - produce_block(); - produce_blocks(23); // Alice produces the last block of her first round. - // Bob's first block (which advances LIB to Alice's last block) is started but not finalized. - BOOST_REQUIRE_EQUAL( control->head_block_producer(), "alice"_n ); - BOOST_REQUIRE_EQUAL( control->pending_block_producer(), "bob"_n ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); + // another round + produce_blocks(config::producer_repetitions); - produce_blocks(12); // Bob produces his first 11 blocks + // sch1 must become active no later than 2 rounds but sch2 cannot become active yet BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - produce_blocks(12); // Bob produces his 12th block. - // Alice's first block of the second round is started but not finalized (which advances LIB to Bob's last block). - BOOST_REQUIRE_EQUAL( control->head_block_producer(), "alice"_n ); - BOOST_REQUIRE_EQUAL( control->pending_block_producer(), "bob"_n ); - BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); - - produce_block(); // Alice produces the first block of her second round which has changed the active schedule. + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - // The next block will be produced according to the new schedule - produce_block(); - BOOST_CHECK_EQUAL( control->head_block_producer(), "carol"_n ); // And that next block happens to be produced by Carol. + produce_blocks(config::producer_repetitions); - BOOST_REQUIRE_EQUAL( validate(), true ); + // sch2 becomes active + BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); } FC_LOG_AND_RETHROW() -BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { - tester c; - c.create_accounts( {"alice"_n,"bob"_n,"carol"_n} ); - c.produce_block(); - - // activate instant_finality - c.set_finalizers({"alice"_n,"bob"_n,"carol"_n}); - auto block = c.produce_block(); // this block contains the header extension of the finalizer set - - auto compare_schedules = [&]( const vector& a, const producer_authority_schedule& b ) { - return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); - }; +BOOST_FIXTURE_TEST_CASE( proposer_policy_misc_tests, validating_tester ) try { + create_accounts( {"alice"_n,"bob"_n} ); - auto res = c.set_producers( {"alice"_n,"bob"_n,"carol"_n} ); - vector sch1 = { - producer_authority{"alice"_n, block_signing_authority_v0{ 1, {{c.get_public_key("alice"_n, "active"),1}}}}, - producer_authority{"bob"_n, block_signing_authority_v0{ 1, {{c.get_public_key("bob"_n, "active"),1}}}}, - producer_authority{"carol"_n, block_signing_authority_v0{ 1, {{c.get_public_key("carol"_n, "active"),1}}}} - }; - wlog("set producer schedule to [alice,bob,carol]"); - BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().has_value() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( c.control->pending_producers().version, 0u ); - c.produce_block(); // Starts new block which promotes the proposed schedule to pending - BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->pending_producers() ) ); - BOOST_CHECK_EQUAL( c.control->active_producers().version, 0u ); - c.produce_block(); - c.produce_block(); // Starts new block which promotes the pending schedule to active - BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); - - produce_until_transition( c, "carol"_n, "alice"_n ); - c.produce_block(); - produce_until_transition( c, "carol"_n, "alice"_n ); - - res = c.set_producers( {"alice"_n,"bob"_n} ); - vector sch2 = { - producer_authority{"alice"_n, block_signing_authority_v0{ 1, {{c.get_public_key("alice"_n, "active"),1}}}}, - producer_authority{"bob"_n, block_signing_authority_v0{ 1, {{c.get_public_key("bob"_n, "active"),1}}}} - }; - wlog("set producer schedule to [alice,bob]"); - BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().has_value() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *c.control->proposed_producers() ) ); - - produce_until_transition( c, "bob"_n, "carol"_n ); - produce_until_transition( c, "alice"_n, "bob"_n ); - BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); - BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); - - produce_until_transition( c, "carol"_n, "alice"_n ); - BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); - BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); - - produce_until_transition( c, "bob"_n, "carol"_n ); - BOOST_CHECK_EQUAL( c.control->pending_block_producer(), "carol"_n ); - BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); - - auto carol_last_produced_block_num = c.control->head_block_num() + 1; - wdump((carol_last_produced_block_num)); - - c.produce_block(); - BOOST_CHECK( c.control->pending_block_producer() == "alice"_n ); - - res = c.set_producers( {"alice"_n,"bob"_n,"carol"_n} ); - wlog("set producer schedule to [alice,bob,carol]"); - BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().has_value() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); - - produce_until_transition( c, "bob"_n, "alice"_n ); - - auto bob_last_produced_block_num = c.control->head_block_num(); - wdump((bob_last_produced_block_num)); - - produce_until_transition( c, "alice"_n, "bob"_n ); - - auto alice_last_produced_block_num = c.control->head_block_num(); - wdump((alice_last_produced_block_num)); - - { - wdump((c.control->head_block_state()->producer_to_last_produced)); - const auto& last_produced = c.control->head_block_state()->producer_to_last_produced; - auto alice_itr = last_produced.find( "alice"_n ); - BOOST_REQUIRE( alice_itr != last_produced.end() ); - BOOST_CHECK_EQUAL( alice_itr->second, alice_last_produced_block_num ); - auto bob_itr = last_produced.find( "bob"_n ); - BOOST_REQUIRE( bob_itr != last_produced.end() ); - BOOST_CHECK_EQUAL( bob_itr->second, bob_last_produced_block_num ); - auto carol_itr = last_produced.find( "carol"_n ); - BOOST_REQUIRE( carol_itr != last_produced.end() ); - BOOST_CHECK_EQUAL( carol_itr->second, carol_last_produced_block_num ); + while (control->head_block_num() < 3) { + produce_block(); } - BOOST_CHECK_EQUAL( c.control->pending_producers().version, 3u ); - BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); - - produce_until_transition( c, "bob"_n, "alice"_n ); - BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 3u ); - - produce_until_transition( c, "alice"_n, "bob"_n ); - c.produce_blocks(11); - BOOST_CHECK_EQUAL( c.control->pending_block_producer(), "bob"_n ); - c.finish_block(); - - auto carol_block_num = c.control->head_block_num() + 1; - auto carol_block_time = c.control->head_block_time() + fc::milliseconds(config::block_interval_ms); - auto confirmed = carol_block_num - carol_last_produced_block_num - 1; - - c.control->start_block( carol_block_time, confirmed, {}, controller::block_status::incomplete ); - BOOST_CHECK_EQUAL( c.control->pending_block_producer(), "carol"_n ); - c.produce_block(); - auto h = c.control->head_block_header(); - - BOOST_CHECK_EQUAL( h.producer, "carol"_n ); - BOOST_CHECK_EQUAL( h.confirmed, confirmed ); - - produce_until_transition( c, "carol"_n, "alice"_n ); - -} FC_LOG_AND_RETHROW() - -BOOST_FIXTURE_TEST_CASE( producer_one_of_n_test, validating_tester ) try { - create_accounts( {"alice"_n,"bob"_n} ); - produce_block(); - // activate instant_finality set_finalizers({"alice"_n,"bob"_n}); - auto block = produce_block(); // this block contains the header extension of the finalizer set - - vector sch1 = { - producer_authority{"alice"_n, block_signing_authority_v0{1, {{get_public_key("alice"_n, "bs1"), 1}, {get_public_key("alice"_n, "bs2"), 1}}}}, - producer_authority{"bob"_n, block_signing_authority_v0{1, {{get_public_key("bob"_n, "bs1"), 1}, {get_public_key("bob"_n, "bs2"), 1}}}} - }; + produce_block(); // this block contains the header extension of the finalizer set - auto res = set_producer_schedule( sch1 ); - block_signing_private_keys.emplace(get_public_key("alice"_n, "bs1"), get_private_key("alice"_n, "bs1")); - block_signing_private_keys.emplace(get_public_key("bob"_n, "bs1"), get_private_key("bob"_n, "bs1")); + { // set multiple policies in the same block. The last one will be chosen + set_producers( {"alice"_n} ); + set_producers( {"bob"_n} ); - BOOST_REQUIRE(produce_until_blocks_from(*this, {"alice"_n, "bob"_n}, 100)); + produce_blocks(2 * config::producer_repetitions); - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() - -BOOST_FIXTURE_TEST_CASE( producer_m_of_n_test, validating_tester ) try { - create_accounts( {"alice"_n,"bob"_n} ); - produce_block(); - - // activate instant_finality - set_finalizers({"alice"_n,"bob"_n}); - auto block = produce_block(); // this block contains the header extension of the finalizer set - - vector sch1 = { - producer_authority{"alice"_n, block_signing_authority_v0{2, {{get_public_key("alice"_n, "bs1"), 1}, {get_public_key("alice"_n, "bs2"), 1}}}}, - producer_authority{"bob"_n, block_signing_authority_v0{2, {{get_public_key("bob"_n, "bs1"), 1}, {get_public_key("bob"_n, "bs2"), 1}}}} + vector sch = { + producer_authority{"bob"_n, block_signing_authority_v0{1, {{get_public_key("bob"_n, "active"), 1}}}} }; + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch, control->active_producers() ) ); + } - auto res = set_producer_schedule( sch1 ); - block_signing_private_keys.emplace(get_public_key("alice"_n, "bs1"), get_private_key("alice"_n, "bs1")); - block_signing_private_keys.emplace(get_public_key("alice"_n, "bs2"), get_private_key("alice"_n, "bs2")); - block_signing_private_keys.emplace(get_public_key("bob"_n, "bs1"), get_private_key("bob"_n, "bs1")); - block_signing_private_keys.emplace(get_public_key("bob"_n, "bs2"), get_private_key("bob"_n, "bs2")); - - BOOST_REQUIRE(produce_until_blocks_from(*this, {"alice"_n, "bob"_n}, 100)); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() - -**/ -#endif + { // unknown account in proposer policy + BOOST_CHECK_THROW( set_producers({"carol"_n}), wasm_execution_error ); + } -BOOST_FIXTURE_TEST_CASE( tmp_placeholder, validating_tester ) try { - // avoid: Test setup error: no test cases matching filter or all test cases were disabled } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index 19f8daed91..5b41df5c64 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -11,6 +11,7 @@ #include #include +#include "test_wasts.hpp" using namespace eosio; using namespace testing; @@ -656,4 +657,37 @@ BOOST_AUTO_TEST_CASE(json_snapshot_validity_test) remove(json_snap_path); } +BOOST_AUTO_TEST_CASE_TEMPLATE(jumbo_row, SNAPSHOT_SUITE, snapshot_suites) +{ + fc::temp_directory tempdir; + auto config = tester::default_config(tempdir); + config.first.state_size = 64*1024*1024; + tester chain(config.first, config.second); + chain.execute_setup_policy(setup_policy::full); + + chain.create_accounts({"jumbo"_n}); + chain.set_code("jumbo"_n, set_jumbo_row_wast); + chain.produce_blocks(1); + + signed_transaction trx; + action act; + act.account = "jumbo"_n; + act.name = "jumbo"_n; + act.authorization = vector{{"jumbo"_n,config::active_name}}; + trx.actions.push_back(act); + + chain.set_transaction_headers(trx); + trx.sign(tester::get_private_key("jumbo"_n, "active"), chain.control->get_chain_id()); + chain.push_transaction(trx); + chain.produce_blocks(1); + + chain.control->abort_block(); + + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); + + snapshotted_tester sst(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 0); +} + BOOST_AUTO_TEST_SUITE_END()