Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/release/5.0' into GH-1690-perf-5.0
Browse files Browse the repository at this point in the history
  • Loading branch information
heifner committed Oct 25, 2023
2 parents bfe2a4c + 81a9d5c commit c59625d
Show file tree
Hide file tree
Showing 4 changed files with 122 additions and 89 deletions.
72 changes: 48 additions & 24 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,9 @@ jobs:
contents: read
with:
runs-on: '["self-hosted", "enf-x86-beefy"]'
platform-files: .cicd/platforms
platform-files: |
.cicd/platforms
tools/reproducible.Dockerfile:builder
build-base:
name: Run Build Workflow
Expand Down Expand Up @@ -77,13 +79,13 @@ jobs:
echo eos-system-contracts-ref=${{inputs.override-eos-system-contracts}} >> $GITHUB_OUTPUT
fi
dev-package:
name: Build leap-dev package
package:
name: Build deb packages
needs: [platform-cache, build-base]
strategy:
fail-fast: false
matrix:
platform: [ubuntu20, ubuntu22]
platform: [ubuntu20, ubuntu22, reproducible]
runs-on: ubuntu-latest
container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}}
steps:
Expand All @@ -94,41 +96,55 @@ jobs:
uses: actions/download-artifact@v3
with:
name: ${{matrix.platform}}-build
- name: Build dev package
- name: Build packages
run: |
zstdcat build.tar.zst | tar x
cd build
cpack
../tools/tweak-deb.sh leap_*.deb
- name: Install dev package
if: matrix.platform != 'reproducible'
run: |
apt-get update && apt-get upgrade -y
apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb
- name: Test using TestHarness
if: matrix.platform != 'reproducible'
run: |
python3 -c "from TestHarness import Cluster"
- name: Upload dev package
uses: actions/upload-artifact@v3
if: matrix.platform != 'reproducible'
with:
name: leap-dev-${{matrix.platform}}-amd64
path: build/leap-dev*.deb
- name: Upload leap package
uses: actions/upload-artifact@v3
if: matrix.platform == 'reproducible'
with:
name: leap-deb-amd64
path: build/leap_*.deb

tests:
name: Tests
name: Tests (${{matrix.cfg.name}})
needs: [platform-cache, build-base]
strategy:
fail-fast: false
matrix:
platform: [ubuntu20, ubuntu22]
include:
- cfg: {name: 'ubuntu20', base: 'ubuntu20', builddir: 'ubuntu20'}
- cfg: {name: 'ubuntu22', base: 'ubuntu22', builddir: 'ubuntu22'}
- cfg: {name: 'ubuntu20repro', base: 'ubuntu20', builddir: 'reproducible'}
- cfg: {name: 'ubuntu22repro', base: 'ubuntu22', builddir: 'reproducible'}
runs-on: ["self-hosted", "enf-x86-hightier"]
container:
image: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}}
image: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.cfg.base].image}}
options: --security-opt seccomp=unconfined
steps:
- uses: actions/checkout@v3
- name: Download builddir
uses: actions/download-artifact@v3
with:
name: ${{matrix.platform}}-build
name: ${{matrix.cfg.builddir}}-build
- name: Run Parallel Tests
run: |
# https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs
Expand All @@ -140,66 +156,74 @@ jobs:
run: awk 'BEGIN {err = 1} /bmi2/ && /adx/ {err = 0} END {exit err}' /proc/cpuinfo

np-tests:
name: NP Tests
name: NP Tests (${{matrix.cfg.name}})
needs: [platform-cache, build-base]
strategy:
fail-fast: false
matrix:
platform: [ubuntu20, ubuntu22]
include:
- cfg: {name: 'ubuntu20', base: 'ubuntu20', builddir: 'ubuntu20'}
- cfg: {name: 'ubuntu22', base: 'ubuntu22', builddir: 'ubuntu22'}
- cfg: {name: 'ubuntu20repro', base: 'ubuntu20', builddir: 'reproducible'}
- cfg: {name: 'ubuntu22repro', base: 'ubuntu22', builddir: 'reproducible'}
runs-on: ["self-hosted", "enf-x86-midtier"]
steps:
- uses: actions/checkout@v3
- name: Download builddir
uses: actions/download-artifact@v3
with:
name: ${{matrix.platform}}-build
name: ${{matrix.cfg.builddir}}-build
- name: Run tests in parallel containers
uses: ./.github/actions/parallel-ctest-containers
with:
container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}}
container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.cfg.base].image}}
error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]'
log-tarball-prefix: ${{matrix.platform}}
log-tarball-prefix: ${{matrix.cfg.name}}
tests-label: nonparallelizable_tests
test-timeout: 420
- name: Upload logs from failed tests
uses: actions/upload-artifact@v3
if: failure()
with:
name: ${{matrix.platform}}-np-logs
name: ${{matrix.cfg.name}}-np-logs
path: '*-logs.tar.gz'

lr-tests:
name: LR Tests
name: LR Tests (${{matrix.cfg.name}})
needs: [platform-cache, build-base]
strategy:
fail-fast: false
matrix:
platform: [ubuntu20, ubuntu22]
include:
- cfg: {name: 'ubuntu20', base: 'ubuntu20', builddir: 'ubuntu20'}
- cfg: {name: 'ubuntu22', base: 'ubuntu22', builddir: 'ubuntu22'}
- cfg: {name: 'ubuntu20repro', base: 'ubuntu20', builddir: 'reproducible'}
- cfg: {name: 'ubuntu22repro', base: 'ubuntu22', builddir: 'reproducible'}
runs-on: ["self-hosted", "enf-x86-lowtier"]
steps:
- uses: actions/checkout@v3
- name: Download builddir
uses: actions/download-artifact@v3
with:
name: ${{matrix.platform}}-build
name: ${{matrix.cfg.builddir}}-build
- name: Run tests in parallel containers
uses: ./.github/actions/parallel-ctest-containers
with:
container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}}
container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.cfg.base].image}}
error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]'
log-tarball-prefix: ${{matrix.platform}}
log-tarball-prefix: ${{matrix.cfg.name}}
tests-label: long_running_tests
test-timeout: 1800
- name: Upload logs from failed tests
uses: actions/upload-artifact@v3
if: failure()
with:
name: ${{matrix.platform}}-lr-logs
name: ${{matrix.cfg.name}}-lr-logs
path: '*-logs.tar.gz'

libtester-tests:
name: libtester tests
needs: [platform-cache, build-base, v, dev-package]
needs: [platform-cache, build-base, v, package]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -290,9 +314,9 @@ jobs:
all-passing:
name: All Required Tests Passed
needs: [dev-package, tests, np-tests, libtester-tests]
needs: [tests, np-tests, libtester-tests]
if: always()
runs-on: ubuntu-latest
steps:
- if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' || needs.libtester-tests.result != 'success'
- if: needs.tests.result != 'success' || needs.np-tests.result != 'success' || needs.libtester-tests.result != 'success'
run: false
3 changes: 1 addition & 2 deletions tests/test_read_only_trx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ void test_trxs_common(std::vector<const char*>& specific_args, bool test_disable
BOOST_CHECK(!"app threw exception see logged error");
} );
fc::scoped_exit<std::function<void()>> on_except = [&](){
app->quit();
if (app_thread.joinable())
app_thread.join();
};
Expand Down Expand Up @@ -166,8 +167,6 @@ void test_trxs_common(std::vector<const char*>& specific_args, bool test_disable
while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){
std::this_thread::sleep_for( 100ms );
}

app->quit();
}

BOOST_CHECK_EQUAL( trace_with_except, 0u ); // should not have any traces with except in it
Expand Down
128 changes: 68 additions & 60 deletions tests/test_snapshot_scheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) {
std::promise<std::tuple<producer_plugin*, chain_plugin*>> plugin_promise;
std::future<std::tuple<producer_plugin*, chain_plugin*>> plugin_fut = plugin_promise.get_future();

std::promise<void> at_block_20_promise;
std::future<void> at_block_20_fut = at_block_20_promise.get_future();

std::thread app_thread([&]() {
try {
fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug);
Expand All @@ -68,54 +71,55 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) {
"-p", "eosio", "-e"};
app->initialize<chain_plugin, producer_plugin>(argv.size(), (char**) &argv[0]);
app->startup();
plugin_promise.set_value(
{app->find_plugin<producer_plugin>(), app->find_plugin<chain_plugin>()});

producer_plugin* prod_plug = app->find_plugin<producer_plugin>();
chain_plugin* chain_plug = app->find_plugin<chain_plugin>();
plugin_promise.set_value({prod_plug, chain_plug});

auto bs = chain_plug->chain().block_start.connect([&prod_plug, &at_block_20_promise](uint32_t bn) {
if(bn == 20u)
at_block_20_promise.set_value();
// catching pending snapshot
if (!prod_plug->get_snapshot_requests().snapshot_requests.empty()) {
const auto& snapshot_requests = prod_plug->get_snapshot_requests().snapshot_requests;

auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0, bool fuzzy_start = false) {
auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;});
if (it != snapshot_requests.end()) {
auto& pending = it->pending_snapshots;
if (pending.size()==1u) {
// pending snapshot block number
auto pbn = pending.begin()->head_block_num;

// first pending snapshot
auto ps_start = (spacing != 0) ? (spacing + (pbn%spacing)) : pbn;

if (!fuzzy_start) {
BOOST_CHECK_EQUAL(block_num, ps_start);
}
else {
int diff = block_num - ps_start;
BOOST_CHECK(std::abs(diff) <= 5); // accept +/- 5 blocks if start block not specified
}
}
return true;
}
return false;
};

BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires
BOOST_REQUIRE(validate_snapshot_request(4, 12, 10, true)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12
BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc
}
});

app->exec();
return;
} FC_LOG_AND_DROP()
BOOST_CHECK(!"app threw exception see logged error");
});

auto [prod_plug, chain_plug] = plugin_fut.get();
std::deque<block_state_ptr> all_blocks;
std::promise<void> empty_blocks_promise;
std::future<void> empty_blocks_fut = empty_blocks_promise.get_future();
auto pp = app->find_plugin<producer_plugin>();

auto bs = chain_plug->chain().block_start.connect([&pp](uint32_t bn) {
// catching pending snapshot
if (!pp->get_snapshot_requests().snapshot_requests.empty()) {
const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests;

auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0, bool fuzzy_start = false) {
auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;});
if (it != snapshot_requests.end()) {
auto& pending = it->pending_snapshots;
if (pending.size()==1u) {
// pending snapshot block number
auto pbn = pending.begin()->head_block_num;

// first pending snapshot
auto ps_start = (spacing != 0) ? (spacing + (pbn%spacing)) : pbn;

if (!fuzzy_start) {
BOOST_CHECK_EQUAL(block_num, ps_start);
}
else {
int diff = block_num - ps_start;
BOOST_CHECK(std::abs(diff) <= 5); // accept +/- 5 blocks if start block not specified
}
}
return true;
}
return false;
};

BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires
BOOST_REQUIRE(validate_snapshot_request(4, 12, 10, true)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12
BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc
}
});

snapshot_request_params sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"};
snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that wont happen in test"};
Expand All @@ -124,31 +128,35 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) {
snapshot_request_params sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"};
snapshot_request_params sri6 = {.block_spacing = 10, .start_block_num = 0, .snapshot_description = "Recurring every 10 blocks snapshot starting from 0"};

pp->schedule_snapshot(sri1);
pp->schedule_snapshot(sri2);
pp->schedule_snapshot(sri3);
pp->schedule_snapshot(sri4);
pp->schedule_snapshot(sri5);
pp->schedule_snapshot(sri6);
app->post(appbase::priority::medium_low, [&]() {
prod_plug->schedule_snapshot(sri1);
prod_plug->schedule_snapshot(sri2);
prod_plug->schedule_snapshot(sri3);
prod_plug->schedule_snapshot(sri4);
prod_plug->schedule_snapshot(sri5);
prod_plug->schedule_snapshot(sri6);

// all six snapshot requests should be present now
BOOST_CHECK_EQUAL(6u, pp->get_snapshot_requests().snapshot_requests.size());
// all six snapshot requests should be present now
BOOST_CHECK_EQUAL(6u, prod_plug->get_snapshot_requests().snapshot_requests.size());
});

empty_blocks_fut.wait_for(std::chrono::seconds(10));
at_block_20_fut.get();

// two of the snapshots are done here and requests, corresponding to them should be deleted
BOOST_CHECK_EQUAL(4u, pp->get_snapshot_requests().snapshot_requests.size());
app->post(appbase::priority::medium_low, [&]() {
// two of the snapshots are done here and requests, corresponding to them should be deleted
BOOST_CHECK_EQUAL(4u, prod_plug->get_snapshot_requests().snapshot_requests.size());

// check whether no pending snapshots present for a snapshot with id 0
const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests;
auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(),[](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == 0;});
// check whether no pending snapshots present for a snapshot with id 0
const auto& snapshot_requests = prod_plug->get_snapshot_requests().snapshot_requests;
auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(),[](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == 0;});

// snapshot request with id = 0 should be found and should not have any pending snapshots
BOOST_REQUIRE(it != snapshot_requests.end());
BOOST_CHECK(!it->pending_snapshots.size());
// snapshot request with id = 0 should be found and should not have any pending snapshots
BOOST_REQUIRE(it != snapshot_requests.end());
BOOST_CHECK(!it->pending_snapshots.size());

// quit app
app->quit();
// quit app
app->quit();
});
app_thread.join();

// lets check whether schedule can be read back after restart
Expand Down
8 changes: 5 additions & 3 deletions tools/reproducible.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,12 @@ FROM builder AS build

ARG LEAP_BUILD_JOBS

COPY / /src
RUN cmake -S src -B build -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -GNinja && \
# Yuck: This places the source at the same location as leap's CI (build.yaml, build_base.yaml). Unfortunately this location only matches
# when build.yaml etc are being run from a repository named leap.
COPY / /__w/leap/leap
RUN cmake -S /__w/leap/leap -B build -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -GNinja && \
cmake --build build -t package -- ${LEAP_BUILD_JOBS:+-j$LEAP_BUILD_JOBS} && \
src/tools/tweak-deb.sh build/leap_*.deb
/__w/leap/leap/tools/tweak-deb.sh build/leap_*.deb

FROM scratch AS exporter
COPY --from=build /build/*.deb /build/*.tar.* /

0 comments on commit c59625d

Please sign in to comment.