From 7c4e7230b0df35dd63645971bd96e0258d3fb0f0 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 1 Dec 2023 02:50:18 -0600 Subject: [PATCH] ledger-tool: Run rustfmt with format_strings = true (#34284) Long string literals can cause rustfmt to fail, which results in rustfmt failing to format entire functions. There are several instances of this in ledger-tool, so format these files with wrapped strings so that formatting will apply to functions again. Note that this PR was created by adding format_strings = true to rustfmt.toml; however, this change does NOT persist that rule as the rule would format the entire repo. --- ledger-tool/src/bigtable.rs | 176 ++-- ledger-tool/src/ledger_utils.rs | 22 +- ledger-tool/src/main.rs | 1629 ++++++++++++++++--------------- ledger-tool/src/program.rs | 4 +- 4 files changed, 978 insertions(+), 853 deletions(-) diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 6de86f4e0b8e0a..ed98f2b0e5bbf4 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -471,7 +471,10 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { debug!("worker {}: received slot {}", i, slot); if !args.force { - match destination_bigtable_clone.confirmed_block_exists(slot).await { + match destination_bigtable_clone + .confirmed_block_exists(slot) + .await + { Ok(exist) => { if exist { skip_slots_clone.lock().unwrap().push(slot); @@ -479,7 +482,11 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { } } Err(err) => { - error!("confirmed_block_exists() failed from the destination Bigtable, slot: {}, err: {}", slot, err); + error!( + "confirmed_block_exists() failed from the destination \ + Bigtable, slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } @@ -499,33 +506,44 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { } } Err(err) => { - error!("failed to get a confirmed block from the source Bigtable, slot: {}, err: {}", slot, err); + error!( + "failed to get a confirmed block from the source Bigtable, \ + slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } }; } else { let confirmed_block = - match source_bigtable_clone.get_confirmed_block(slot).await { - Ok(block) => match VersionedConfirmedBlock::try_from(block) { - Ok(block) => block, + match source_bigtable_clone.get_confirmed_block(slot).await { + Ok(block) => match VersionedConfirmedBlock::try_from(block) { + Ok(block) => block, + Err(err) => { + error!( + "failed to convert confirmed block to versioned \ + confirmed block, slot: {}, err: {}", + slot, err + ); + failed_slots_clone.lock().unwrap().push(slot); + continue; + } + }, + Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => { + debug!("block not found, slot: {}", slot); + block_not_found_slots_clone.lock().unwrap().push(slot); + continue; + } Err(err) => { - error!("failed to convert confirmed block to versioned confirmed block, slot: {}, err: {}", slot, err); + error!( + "failed to get confirmed block, slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } - }, - Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => { - debug!("block not found, slot: {}", slot); - block_not_found_slots_clone.lock().unwrap().push(slot); - continue; - } - Err(err) => { - error!("failed to get confirmed block, slot: {}, err: {}", slot, err); - failed_slots_clone.lock().unwrap().push(slot); - continue; - } - }; + }; match destination_bigtable_clone .upload_confirmed_block(slot, confirmed_block) @@ -627,7 +645,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Name of the target Bigtable instance") + .help("Name of the target Bigtable instance"), ) .arg( Arg::with_name("rpc_bigtable_app_profile_id") @@ -636,7 +654,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Bigtable application profile id to use in requests") + .help("Bigtable application profile id to use in requests"), ) .subcommand( SubCommand::with_name("upload") @@ -666,9 +684,9 @@ impl BigTableSubCommand for App<'_, '_> { .long("force") .takes_value(false) .help( - "Force reupload of any blocks already present in BigTable instance\ - Note: reupload will *not* delete any data from the tx-by-addr table;\ - Use with care.", + "Force reupload of any blocks already present in BigTable \ + instance. Note: reupload will *not* delete any data from the \ + tx-by-addr table; Use with care.", ), ), ) @@ -676,24 +694,25 @@ impl BigTableSubCommand for App<'_, '_> { SubCommand::with_name("delete-slots") .about("Delete ledger information from BigTable") .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to delete"), - ) - .arg( - Arg::with_name("force") - .long("force") - .takes_value(false) - .help( - "Deletions are only performed when the force flag is enabled. \ - If force is not enabled, show stats about what ledger data \ - will be deleted in a real deletion. "), - ), + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to delete"), ) + .arg( + Arg::with_name("force") + .long("force") + .takes_value(false) + .help( + "Deletions are only performed when the force flag is enabled. \ + If force is not enabled, show stats about what ledger data \ + will be deleted in a real deletion. ", + ), + ), + ) .subcommand( SubCommand::with_name("first-available-block") .about("Get the first available block in the storage"), @@ -726,8 +745,10 @@ impl BigTableSubCommand for App<'_, '_> { ) .subcommand( SubCommand::with_name("compare-blocks") - .about("Find the missing confirmed blocks of an owned bigtable for a given range \ - by comparing to a reference bigtable") + .about( + "Find the missing confirmed blocks of an owned bigtable for a given \ + range by comparing to a reference bigtable", + ) .arg( Arg::with_name("starting_slot") .validator(is_slot) @@ -763,7 +784,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Name of the reference Bigtable instance to compare to") + .help("Name of the reference Bigtable instance to compare to"), ) .arg( Arg::with_name("reference_app_profile_id") @@ -771,7 +792,9 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Reference Bigtable application profile id to use in requests") + .help( + "Reference Bigtable application profile id to use in requests", + ), ), ) .subcommand( @@ -798,7 +821,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .index(1) .required(true), - ) + ), ) .subcommand( SubCommand::with_name("confirm") @@ -816,8 +839,8 @@ impl BigTableSubCommand for App<'_, '_> { .subcommand( SubCommand::with_name("transaction-history") .about( - "Show historical transactions affecting the given address \ - from newest to oldest", + "Show historical transactions affecting the given address from newest \ + to oldest", ) .arg( Arg::with_name("address") @@ -846,8 +869,8 @@ impl BigTableSubCommand for App<'_, '_> { .default_value("1000") .help( "Number of transaction signatures to query at once. \ - Smaller: more responsive/lower throughput. \ - Larger: less responsive/higher throughput", + Smaller: more responsive/lower throughput. \ + Larger: less responsive/higher throughput", ), ) .arg( @@ -881,7 +904,8 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .conflicts_with("emulated_source") .help( - "Source Bigtable credential filepath (credential may be readonly)", + "Source Bigtable credential filepath (credential may be \ + readonly)", ), ) .arg( @@ -890,9 +914,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("EMULATED_SOURCE") .takes_value(true) .conflicts_with("source_credential_path") - .help( - "Source Bigtable emulated source", - ), + .help("Source Bigtable emulated source"), ) .arg( Arg::with_name("source_instance_name") @@ -900,7 +922,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("SOURCE_INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Source Bigtable instance name") + .help("Source Bigtable instance name"), ) .arg( Arg::with_name("source_app_profile_id") @@ -908,7 +930,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("SOURCE_APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Source Bigtable app profile id") + .help("Source Bigtable app profile id"), ) .arg( Arg::with_name("destination_credential_path") @@ -917,7 +939,8 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .conflicts_with("emulated_destination") .help( - "Destination Bigtable credential filepath (credential must have Bigtable write permissions)", + "Destination Bigtable credential filepath (credential must \ + have Bigtable write permissions)", ), ) .arg( @@ -926,9 +949,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("EMULATED_DESTINATION") .takes_value(true) .conflicts_with("destination_credential_path") - .help( - "Destination Bigtable emulated destination", - ), + .help("Destination Bigtable emulated destination"), ) .arg( Arg::with_name("destination_instance_name") @@ -936,7 +957,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("DESTINATION_INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Destination Bigtable instance name") + .help("Destination Bigtable instance name"), ) .arg( Arg::with_name("destination_app_profile_id") @@ -944,7 +965,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("DESTINATION_APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Destination Bigtable app profile id") + .help("Destination Bigtable app profile id"), ) .arg( Arg::with_name("starting_slot") @@ -953,9 +974,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("START_SLOT") .takes_value(true) .required(true) - .help( - "Start copying at this slot", - ), + .help("Start copying at this slot (inclusive)"), ) .arg( Arg::with_name("ending_slot") @@ -963,26 +982,25 @@ impl BigTableSubCommand for App<'_, '_> { .validator(is_slot) .value_name("END_SLOT") .takes_value(true) - .help("Stop copying at this slot (inclusive, START_SLOT ..= END_SLOT)"), + .help("Stop copying at this slot (inclusive)"), ) .arg( Arg::with_name("force") - .long("force") - .value_name("FORCE") - .takes_value(false) - .help( - "Force copy of blocks already present in destination Bigtable instance", - ), + .long("force") + .value_name("FORCE") + .takes_value(false) + .help( + "Force copy of blocks already present in destination Bigtable \ + instance", + ), ) .arg( Arg::with_name("dry_run") - .long("dry-run") - .value_name("DRY_RUN") - .takes_value(false) - .help( - "Dry run. It won't upload any blocks", - ), - ) + .long("dry-run") + .value_name("DRY_RUN") + .takes_value(false) + .help("Dry run. It won't upload any blocks"), + ), ), ) } diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 8063402298482f..292aee2e1ee391 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -107,8 +107,8 @@ pub fn load_and_process_ledger( }) }; - let start_slot_msg = "The starting slot will be the latest snapshot slot, or genesis if \ - the --no-snapshot flag is specified or if no snapshots are found."; + let start_slot_msg = "The starting slot will be the latest snapshot slot, or genesis if the \ + --no-snapshot flag is specified or if no snapshots are found."; match process_options.halt_at_slot { // Skip the following checks for sentinel values of Some(0) and None. // For Some(0), no slots will be be replayed after starting_slot. @@ -118,7 +118,7 @@ pub fn load_and_process_ledger( if halt_slot < starting_slot { eprintln!( "Unable to process blockstore from starting slot {starting_slot} to \ - {halt_slot}; the ending slot is less than the starting slot. {start_slot_msg}" + {halt_slot}; the ending slot is less than the starting slot. {start_slot_msg}" ); exit(1); } @@ -126,8 +126,8 @@ pub fn load_and_process_ledger( if !blockstore.slot_range_connected(starting_slot, halt_slot) { eprintln!( "Unable to process blockstore from starting slot {starting_slot} to \ - {halt_slot}; the blockstore does not contain a replayable chain between these \ - slots. {start_slot_msg}" + {halt_slot}; the blockstore does not contain a replayable chain between \ + these slots. {start_slot_msg}" ); exit(1); } @@ -367,8 +367,8 @@ pub fn open_blockstore( let shred_storage_type = get_shred_storage_type( ledger_path, &format!( - "Shred storage type cannot be inferred for ledger at {ledger_path:?}, \ - using default RocksLevel", + "Shred storage type cannot be inferred for ledger at {ledger_path:?}, using default \ + RocksLevel", ), ); @@ -401,13 +401,13 @@ pub fn open_blockstore( if missing_blockstore && is_secondary { eprintln!( - "Failed to open blockstore at {ledger_path:?}, it \ - is missing at least one critical file: {err:?}" + "Failed to open blockstore at {ledger_path:?}, it is missing at least one \ + critical file: {err:?}" ); } else if missing_column && is_secondary { eprintln!( - "Failed to open blockstore at {ledger_path:?}, it \ - does not have all necessary columns: {err:?}" + "Failed to open blockstore at {ledger_path:?}, it does not have all necessary \ + columns: {err:?}" ); } else { eprintln!("Failed to open blockstore at {ledger_path:?}: {err:?}"); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 053656990fcf36..ac8404edd53dbb 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -244,7 +244,8 @@ fn output_slot( println!(" {meta:?} is_full: {is_full}"); } else { println!( - " num_shreds: {}, parent_slot: {:?}, next_slots: {:?}, num_entries: {}, is_full: {}", + " num_shreds: {}, parent_slot: {:?}, next_slots: {:?}, num_entries: {}, \ + is_full: {}", num_shreds, meta.parent_slot, meta.next_slots, @@ -880,7 +881,8 @@ fn print_blockstore_file_metadata( for file in live_files { if sst_file_name.is_none() || file.name.eq(sst_file_name.as_ref().unwrap()) { println!( - "[{}] cf_name: {}, level: {}, start_slot: {:?}, end_slot: {:?}, size: {}, num_entries: {}", + "[{}] cf_name: {}, level: {}, start_slot: {:?}, end_slot: {:?}, size: {}, \ + num_entries: {}", file.name, file.column_family_name, file.level, @@ -942,7 +944,8 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> let result = cost_tracker.try_add(&tx_cost); if result.is_err() { println!( - "Slot: {slot}, CostModel rejected transaction {transaction:?}, reason {result:?}", + "Slot: {slot}, CostModel rejected transaction {transaction:?}, reason \ + {result:?}", ); } for (program_id, _instruction) in transaction.message().program_instructions_iter() @@ -953,7 +956,8 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> } println!( - "Slot: {slot}, Entries: {num_entries}, Transactions: {num_transactions}, Programs {num_programs}", + "Slot: {slot}, Entries: {num_entries}, Transactions: {num_transactions}, Programs \ + {num_programs}", ); println!(" Programs: {program_ids:?}"); @@ -1011,7 +1015,7 @@ fn get_latest_optimistic_slots( if hash_and_timestamp_opt.is_none() { warn!( "Slot {slot} is an ancestor of latest optimistically confirmed slot \ - {latest_slot}, but was not marked as optimistically confirmed in blockstore." + {latest_slot}, but was not marked as optimistically confirmed in blockstore." ); } (slot, hash_and_timestamp_opt, contains_nonvote_tx) @@ -1117,16 +1121,22 @@ fn main() { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."); + .help( + "How much memory the accounts index can consume. If this is exceeded, some account \ + index entries will be stored on disk.", + ); let disable_disk_index = Arg::with_name("disable_accounts_disk_index") .long("disable-accounts-disk-index") - .help("Disable the disk-based accounts index. It is enabled by default. The entire accounts index will be kept in memory.") + .help( + "Disable the disk-based accounts index. It is enabled by default. The entire accounts \ + index will be kept in memory.", + ) .conflicts_with("accounts_index_memory_limit_mb"); let accountsdb_skip_shrink = Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") .help( - "Enables faster starting of ledger-tool by skipping shrink. \ - This option is for use during testing.", + "Enables faster starting of ledger-tool by skipping shrink. This option is for use \ + during testing.", ); let accountsdb_verify_refcounts = Arg::with_name("accounts_db_verify_refcounts") .long("accounts-db-verify-refcounts") @@ -1134,12 +1144,14 @@ fn main() { "Debug option to scan all AppendVecs and verify account index refcounts prior to clean", ) .hidden(hidden_unless_forced()); - let accounts_db_test_skip_rewrites_but_include_in_bank_hash = Arg::with_name("accounts_db_test_skip_rewrites") - .long("accounts-db-test-skip-rewrites") - .help( - "Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation", - ) - .hidden(hidden_unless_forced()); + let accounts_db_test_skip_rewrites_but_include_in_bank_hash = + Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in \ + bank delta hash calculation", + ) + .hidden(hidden_unless_forced()); let account_paths_arg = Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") @@ -1156,9 +1168,8 @@ fn main() { .takes_value(true) .multiple(true) .help( - "Persistent accounts-index location. \ - May be specified multiple times. \ - [default: [ledger]/accounts_index]", + "Persistent accounts-index location. May be specified multiple times. [default: \ + [ledger]/accounts_index]", ); let accounts_db_test_hash_calculation_arg = Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") @@ -1187,19 +1198,26 @@ fn main() { ) .hidden(hidden_unless_forced()); let halt_at_slot_store_hash_raw_data = Arg::with_name("halt_at_slot_store_hash_raw_data") - .long("halt-at-slot-store-hash-raw-data") - .help("After halting at slot, run an accounts hash calculation and store the raw hash data for debugging.") - .hidden(hidden_unless_forced()); + .long("halt-at-slot-store-hash-raw-data") + .help( + "After halting at slot, run an accounts hash calculation and store the raw hash data \ + for debugging.", + ) + .hidden(hidden_unless_forced()); let verify_index_arg = Arg::with_name("verify_accounts_index") .long("verify-accounts-index") .takes_value(false) .help("For debugging and tests on accounts index."); - let limit_load_slot_count_from_snapshot_arg = Arg::with_name("limit_load_slot_count_from_snapshot") - .long("limit-load-slot-count-from-snapshot") - .value_name("SLOT") - .validator(is_slot) - .takes_value(true) - .help("For debugging and profiling with large snapshots, artificially limit how many slots are loaded from a snapshot."); + let limit_load_slot_count_from_snapshot_arg = + Arg::with_name("limit_load_slot_count_from_snapshot") + .long("limit-load-slot-count-from-snapshot") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .help( + "For debugging and profiling with large snapshots, artificially limit how many \ + slots are loaded from a snapshot.", + ); let hard_forks_arg = Arg::with_name("hard_forks") .long("hard-fork") .value_name("SLOT") @@ -1223,9 +1241,8 @@ fn main() { .value_name("NUM_HASHES|\"sleep\"") .takes_value(true) .help( - "How many PoH hashes to roll before emitting the next tick. \ - If \"sleep\", for development \ - sleep for the target tick duration instead of hashing", + "How many PoH hashes to roll before emitting the next tick. If \"sleep\", for \ + development sleep for the target tick duration instead of hashing", ); let snapshot_version_arg = Arg::with_name("snapshot_version") .long("snapshot-version") @@ -1252,30 +1269,32 @@ fn main() { let default_max_full_snapshot_archives_to_retain = &DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(); - let maximum_full_snapshot_archives_to_retain = Arg::with_name( - "maximum_full_snapshots_to_retain", - ) - .long("maximum-full-snapshots-to-retain") - .alias("maximum-snapshots-to-retain") - .value_name("NUMBER") - .takes_value(true) - .default_value(default_max_full_snapshot_archives_to_retain) - .validator(validate_maximum_full_snapshot_archives_to_retain) - .help( - "The maximum number of full snapshot archives to hold on to when purging older snapshots.", - ); + let maximum_full_snapshot_archives_to_retain = + Arg::with_name("maximum_full_snapshots_to_retain") + .long("maximum-full-snapshots-to-retain") + .alias("maximum-snapshots-to-retain") + .value_name("NUMBER") + .takes_value(true) + .default_value(default_max_full_snapshot_archives_to_retain) + .validator(validate_maximum_full_snapshot_archives_to_retain) + .help( + "The maximum number of full snapshot archives to hold on to when purging older \ + snapshots.", + ); let default_max_incremental_snapshot_archives_to_retain = &DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(); - let maximum_incremental_snapshot_archives_to_retain = Arg::with_name( - "maximum_incremental_snapshots_to_retain", - ) - .long("maximum-incremental-snapshots-to-retain") - .value_name("NUMBER") - .takes_value(true) - .default_value(default_max_incremental_snapshot_archives_to_retain) - .validator(validate_maximum_incremental_snapshot_archives_to_retain) - .help("The maximum number of incremental snapshot archives to hold on to when purging older snapshots."); + let maximum_incremental_snapshot_archives_to_retain = + Arg::with_name("maximum_incremental_snapshots_to_retain") + .long("maximum-incremental-snapshots-to-retain") + .value_name("NUMBER") + .takes_value(true) + .default_value(default_max_incremental_snapshot_archives_to_retain) + .validator(validate_maximum_incremental_snapshot_archives_to_retain) + .help( + "The maximum number of incremental snapshot archives to hold on to when purging \ + older snapshots.", + ); let geyser_plugin_args = Arg::with_name("geyser_plugin_config") .long("geyser-plugin-config") @@ -1328,27 +1347,30 @@ fn main() { "tolerate_corrupted_tail_records", "absolute_consistency", "point_in_time", - "skip_any_corrupted_record"]) - .help( - "Mode to recovery the ledger db write ahead log" - ), + "skip_any_corrupted_record", + ]) + .help("Mode to recovery the ledger db write ahead log"), ) .arg( Arg::with_name("force_update_to_open") .long("force-update-to-open") .takes_value(false) .global(true) - .help("Allow commands that would otherwise not alter the \ - blockstore to make necessary updates in order to open it"), + .help( + "Allow commands that would otherwise not alter the blockstore to make \ + necessary updates in order to open it", + ), ) .arg( Arg::with_name("ignore_ulimit_nofile_error") .long("ignore-ulimit-nofile-error") .value_name("FORMAT") .global(true) - .help("Allow opening the blockstore to succeed even if the desired open file \ - descriptor limit cannot be configured. Use with caution as some commands may \ - run fine with a reduced file descriptor limit while others will not"), + .help( + "Allow opening the blockstore to succeed even if the desired open file \ + descriptor limit cannot be configured. Use with caution as some commands may \ + run fine with a reduced file descriptor limit while others will not", + ), ) .arg( Arg::with_name("snapshot_archive_path") @@ -1383,8 +1405,10 @@ fn main() { .global(true) .takes_value(true) .possible_values(&["json", "json-compact"]) - .help("Return information in specified output format, \ - currently only available for bigtable and program subcommands"), + .help( + "Return information in specified output format, currently only available for \ + bigtable and program subcommands", + ), ) .arg( Arg::with_name("verbose") @@ -1398,702 +1422,756 @@ fn main() { .bigtable_subcommand() .subcommand( SubCommand::with_name("print") - .about("Print the ledger") - .arg(&starting_slot_arg) - .arg(&allow_dead_slots_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("num_slots") - .long("num-slots") - .value_name("SLOT") - .validator(is_slot) - .takes_value(true) - .help("Number of slots to print"), - ) - .arg( - Arg::with_name("only_rooted") - .long("only-rooted") - .takes_value(false) - .help("Only print root slots"), - ) + .about("Print the ledger") + .arg(&starting_slot_arg) + .arg(&allow_dead_slots_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("num_slots") + .long("num-slots") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .help("Number of slots to print"), + ) + .arg( + Arg::with_name("only_rooted") + .long("only-rooted") + .takes_value(false) + .help("Only print root slots"), + ), ) .subcommand( SubCommand::with_name("copy") - .about("Copy the ledger") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("target_db") - .long("target-db") - .value_name("DIR") - .takes_value(true) - .help("Target db"), - ) + .about("Copy the ledger") + .arg(&starting_slot_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("target_db") + .long("target-db") + .value_name("DIR") + .takes_value(true) + .help("Target db"), + ), ) .subcommand( SubCommand::with_name("slot") - .about("Print the contents of one or more slots") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to print"), - ) - .arg(&allow_dead_slots_arg) + .about("Print the contents of one or more slots") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to print"), + ) + .arg(&allow_dead_slots_arg), ) .subcommand( SubCommand::with_name("dead-slots") - .arg(&starting_slot_arg) - .about("Print all the dead slots in the ledger") + .arg(&starting_slot_arg) + .about("Print all the dead slots in the ledger"), ) .subcommand( SubCommand::with_name("duplicate-slots") - .arg(&starting_slot_arg) - .about("Print all the duplicate slots in the ledger") + .arg(&starting_slot_arg) + .about("Print all the duplicate slots in the ledger"), ) .subcommand( SubCommand::with_name("set-dead-slot") - .about("Mark one or more slots dead") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to mark dead"), - ) + .about("Mark one or more slots dead") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to mark dead"), + ), ) .subcommand( SubCommand::with_name("remove-dead-slot") - .about("Remove the dead flag for a slot") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to mark as not dead"), - ) + .about("Remove the dead flag for a slot") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to mark as not dead"), + ), ) .subcommand( SubCommand::with_name("genesis") - .about("Prints the ledger's genesis config") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg( - Arg::with_name("accounts") - .long("accounts") - .takes_value(false) - .help("Print the ledger's genesis accounts"), - ) - .arg( - Arg::with_name("no_account_data") - .long("no-account-data") - .takes_value(false) - .requires("accounts") - .help("Do not print account data when printing account contents."), - ) - .arg(&accounts_data_encoding_arg) + .about("Prints the ledger's genesis config") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg( + Arg::with_name("accounts") + .long("accounts") + .takes_value(false) + .help("Print the ledger's genesis accounts"), + ) + .arg( + Arg::with_name("no_account_data") + .long("no-account-data") + .takes_value(false) + .requires("accounts") + .help("Do not print account data when printing account contents."), + ) + .arg(&accounts_data_encoding_arg), ) .subcommand( SubCommand::with_name("genesis-hash") - .about("Prints the ledger's genesis hash") - .arg(&max_genesis_archive_unpacked_size_arg) + .about("Prints the ledger's genesis hash") + .arg(&max_genesis_archive_unpacked_size_arg), ) .subcommand( SubCommand::with_name("parse_full_frozen") - .about("Parses log for information about critical events about \ - ancestors of the given `ending_slot`") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("log_path") - .long("log-path") - .value_name("PATH") - .takes_value(true) - .help("path to log file to parse"), - ) + .about( + "Parses log for information about critical events about ancestors of the \ + given `ending_slot`", + ) + .arg(&starting_slot_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("log_path") + .long("log-path") + .value_name("PATH") + .takes_value(true) + .help("path to log file to parse"), + ), ) .subcommand( SubCommand::with_name("modify-genesis") - .about("Modifies genesis parameters") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&hashes_per_tick) - .arg( - Arg::with_name("cluster_type") - .long("cluster-type") - .possible_values(&ClusterType::STRINGS) - .takes_value(true) - .help( - "Selects the features that will be enabled for the cluster" - ), - ) - .arg( - Arg::with_name("output_directory") - .index(1) - .value_name("DIR") - .takes_value(true) - .help("Output directory for the modified genesis config"), - ) + .about("Modifies genesis parameters") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&hashes_per_tick) + .arg( + Arg::with_name("cluster_type") + .long("cluster-type") + .possible_values(&ClusterType::STRINGS) + .takes_value(true) + .help("Selects the features that will be enabled for the cluster"), + ) + .arg( + Arg::with_name("output_directory") + .index(1) + .value_name("DIR") + .takes_value(true) + .help("Output directory for the modified genesis config"), + ), ) .subcommand( SubCommand::with_name("shred-version") - .about("Prints the ledger's shred hash") - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .about("Prints the ledger's shred hash") + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash), ) .subcommand( SubCommand::with_name("shred-meta") - .about("Prints raw shred metadata") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) + .about("Prints raw shred metadata") + .arg(&starting_slot_arg) + .arg(&ending_slot_arg), ) .subcommand( SubCommand::with_name("bank-hash") - .about("Prints the hash of the working bank after reading the ledger") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&halt_at_slot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .about("Prints the hash of the working bank after reading the ledger") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&halt_at_slot_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash), ) .subcommand( SubCommand::with_name("bounds") - .about( - "Print lowest and highest non-empty slots. \ - Note that there may be empty slots within the bounds", - ) - .arg( - Arg::with_name("all") - .long("all") - .takes_value(false) - .required(false) - .help("Additionally print all the non-empty slots within the bounds"), - ) + .about( + "Print lowest and highest non-empty slots. Note that there may be empty slots \ + within the bounds", + ) + .arg( + Arg::with_name("all") + .long("all") + .takes_value(false) + .required(false) + .help("Additionally print all the non-empty slots within the bounds"), + ), ) .subcommand( SubCommand::with_name("json") - .about("Print the ledger in JSON format") - .arg(&starting_slot_arg) - .arg(&allow_dead_slots_arg) + .about("Print the ledger in JSON format") + .arg(&starting_slot_arg) + .arg(&allow_dead_slots_arg), ) .subcommand( SubCommand::with_name("verify") - .about("Verify the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_path_arg) - .arg(&halt_at_slot_arg) - .arg(&limit_load_slot_count_from_snapshot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_skip_shrink) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&verify_index_arg) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&ancient_append_vecs) - .arg(&halt_at_slot_store_hash_raw_data) - .arg(&hard_forks_arg) - .arg(&accounts_db_test_hash_calculation_arg) - .arg(&no_os_memory_stats_reporting_arg) - .arg(&allow_dead_slots_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&debug_key_arg) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("skip_poh_verify") - .long("skip-poh-verify") - .takes_value(false) - .help( - "Deprecated, please use --skip-verification.\n\ - Skip ledger PoH and transaction verification." - ), - ) - .arg( - Arg::with_name("skip_verification") - .long("skip-verification") - .takes_value(false) - .help("Skip ledger PoH and transaction verification."), - ) - .arg( - Arg::with_name("enable_rpc_transaction_history") - .long("enable-rpc-transaction-history") - .takes_value(false) - .help("Store transaction info for processed slots into local ledger"), - ) - .arg( - Arg::with_name("run_final_hash_calc") - .long("run-final-accounts-hash-calculation") - .takes_value(false) - .help("After 'verify' completes, run a final accounts hash calculation. Final hash calculation could race with accounts background service tasks and assert."), - ) - .arg( - Arg::with_name("partitioned_epoch_rewards_compare_calculation") - .long("partitioned-epoch-rewards-compare-calculation") - .takes_value(false) - .help("Do normal epoch rewards distribution, but also calculate rewards using the partitioned rewards code path and compare the resulting vote and stake accounts") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") - .long("partitioned-epoch-rewards-force-enable-single-slot") - .takes_value(false) - .help("Force the partitioned rewards distribution, but distribute all rewards in the first slot in the epoch. This should match consensus with the normal rewards distribution.") - .conflicts_with("partitioned_epoch_rewards_compare_calculation") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("print_accounts_stats") - .long("print-accounts-stats") - .takes_value(false) - .help("After verifying the ledger, print some information about the account stores"), - ) - .arg( - Arg::with_name("write_bank_file") - .long("write-bank-file") - .takes_value(false) - .help("After verifying the ledger, write a file that contains the information \ - that went into computing the completed bank's bank hash. The file will be \ - written within /bank_hash_details/"), - ) - ).subcommand( + .about("Verify the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_path_arg) + .arg(&halt_at_slot_arg) + .arg(&limit_load_slot_count_from_snapshot_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_skip_shrink) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&verify_index_arg) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&ancient_append_vecs) + .arg(&halt_at_slot_store_hash_raw_data) + .arg(&hard_forks_arg) + .arg(&accounts_db_test_hash_calculation_arg) + .arg(&no_os_memory_stats_reporting_arg) + .arg(&allow_dead_slots_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&debug_key_arg) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("skip_poh_verify") + .long("skip-poh-verify") + .takes_value(false) + .help( + "Deprecated, please use --skip-verification. Skip ledger PoH and \ + transaction verification.", + ), + ) + .arg( + Arg::with_name("skip_verification") + .long("skip-verification") + .takes_value(false) + .help("Skip ledger PoH and transaction verification."), + ) + .arg( + Arg::with_name("enable_rpc_transaction_history") + .long("enable-rpc-transaction-history") + .takes_value(false) + .help("Store transaction info for processed slots into local ledger"), + ) + .arg( + Arg::with_name("run_final_hash_calc") + .long("run-final-accounts-hash-calculation") + .takes_value(false) + .help( + "After 'verify' completes, run a final accounts hash calculation. \ + Final hash calculation could race with accounts background service \ + tasks and assert.", + ), + ) + .arg( + Arg::with_name("partitioned_epoch_rewards_compare_calculation") + .long("partitioned-epoch-rewards-compare-calculation") + .takes_value(false) + .help( + "Do normal epoch rewards distribution, but also calculate rewards \ + using the partitioned rewards code path and compare the resulting \ + vote and stake accounts", + ) + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") + .long("partitioned-epoch-rewards-force-enable-single-slot") + .takes_value(false) + .help( + "Force the partitioned rewards distribution, but distribute all \ + rewards in the first slot in the epoch. This should match consensus \ + with the normal rewards distribution.", + ) + .conflicts_with("partitioned_epoch_rewards_compare_calculation") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("print_accounts_stats") + .long("print-accounts-stats") + .takes_value(false) + .help( + "After verifying the ledger, print some information about the account \ + stores", + ), + ) + .arg( + Arg::with_name("write_bank_file") + .long("write-bank-file") + .takes_value(false) + .help( + "After verifying the ledger, write a file that contains the \ + information that went into computing the completed bank's bank hash. \ + The file will be written within /bank_hash_details/", + ), + ), + ) + .subcommand( SubCommand::with_name("graph") - .about("Create a Graphviz rendering of the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("include_all_votes") - .long("include-all-votes") - .help("Include all votes in the graph"), - ) - .arg( - Arg::with_name("graph_filename") - .index(1) - .value_name("FILENAME") - .takes_value(true) - .help("Output file"), - ) - .arg( - Arg::with_name("vote_account_mode") - .long("vote-account-mode") - .takes_value(true) - .value_name("MODE") - .default_value(default_graph_vote_account_mode.as_ref()) - .possible_values(GraphVoteAccountMode::ALL_MODE_STRINGS) - .help("Specify if and how to graph vote accounts. Enabling will incur significant rendering overhead, especially `with-history`") - ) - ).subcommand( + .about("Create a Graphviz rendering of the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("include_all_votes") + .long("include-all-votes") + .help("Include all votes in the graph"), + ) + .arg( + Arg::with_name("graph_filename") + .index(1) + .value_name("FILENAME") + .takes_value(true) + .help("Output file"), + ) + .arg( + Arg::with_name("vote_account_mode") + .long("vote-account-mode") + .takes_value(true) + .value_name("MODE") + .default_value(default_graph_vote_account_mode.as_ref()) + .possible_values(GraphVoteAccountMode::ALL_MODE_STRINGS) + .help( + "Specify if and how to graph vote accounts. Enabling will incur \ + significant rendering overhead, especially `with-history`", + ), + ), + ) + .subcommand( SubCommand::with_name("create-snapshot") - .about("Create a new ledger snapshot") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accountsdb_skip_shrink) - .arg(&ancient_append_vecs) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&snapshot_version_arg) - .arg(&maximum_full_snapshot_archives_to_retain) - .arg(&maximum_incremental_snapshot_archives_to_retain) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("snapshot_slot") - .index(1) - .value_name("SLOT") - .validator(|value| { - if value.parse::().is_ok() - || value == "ROOT" - { - Ok(()) - } else { - Err(format!( - "Unable to parse as a number or the keyword ROOT, provided: {value}" - )) - } - }) - .takes_value(true) - .help("Slot at which to create the snapshot; accepts keyword ROOT for the highest root"), - ) - .arg( - Arg::with_name("output_directory") - .index(2) - .value_name("DIR") - .takes_value(true) - .help("Output directory for the snapshot [default: --snapshot-archive-path if present else --ledger directory]"), - ) - .arg( - Arg::with_name("warp_slot") - .required(false) - .long("warp-slot") - .takes_value(true) - .value_name("WARP_SLOT") - .validator(is_slot) - .help("After loading the snapshot slot warp the ledger to WARP_SLOT, \ - which could be a slot in a galaxy far far away"), - ) - .arg( - Arg::with_name("faucet_lamports") - .short("t") - .long("faucet-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .requires("faucet_pubkey") - .help("Number of lamports to assign to the faucet"), - ) - .arg( - Arg::with_name("faucet_pubkey") - .short("m") - .long("faucet-pubkey") - .value_name("PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .requires("faucet_lamports") - .help("Path to file containing the faucet's pubkey"), - ) - .arg( - Arg::with_name("bootstrap_validator") - .short("b") - .long("bootstrap-validator") - .value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .number_of_values(3) - .multiple(true) - .help("The bootstrap validator's identity, vote and stake pubkeys"), - ) - .arg( - Arg::with_name("bootstrap_stake_authorized_pubkey") - .long("bootstrap-stake-authorized-pubkey") - .value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .help( - "Path to file containing the pubkey authorized to manage the bootstrap \ - validator's stake [default: --bootstrap-validator IDENTITY_PUBKEY]", - ), - ) - .arg( - Arg::with_name("bootstrap_validator_lamports") - .long("bootstrap-validator-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .default_value(default_bootstrap_validator_lamports) - .help("Number of lamports to assign to the bootstrap validator"), - ) - .arg( - Arg::with_name("bootstrap_validator_stake_lamports") - .long("bootstrap-validator-stake-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .default_value(default_bootstrap_validator_stake_lamports) - .help("Number of lamports to assign to the bootstrap validator's stake account"), - ) - .arg( - Arg::with_name("rent_burn_percentage") - .long("rent-burn-percentage") - .value_name("NUMBER") - .takes_value(true) - .help("Adjust percentage of collected rent to burn") - .validator(is_valid_percentage), - ) - .arg(&hashes_per_tick) - .arg( - Arg::with_name("accounts_to_remove") - .required(false) - .long("remove-account") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of accounts to remove while creating the snapshot"), - ) - .arg( - Arg::with_name("feature_gates_to_deactivate") - .required(false) - .long("deactivate-feature-gate") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of feature gates to deactivate while creating the snapshot") - ) - .arg( - Arg::with_name("vote_accounts_to_destake") - .required(false) - .long("destake-vote-account") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of validator vote accounts to destake") - ) - .arg( - Arg::with_name("remove_stake_accounts") - .required(false) - .long("remove-stake-accounts") - .takes_value(false) - .help("Remove all existing stake accounts from the new snapshot") - ) - .arg( - Arg::with_name("incremental") - .long("incremental") - .takes_value(false) - .help("Create an incremental snapshot instead of a full snapshot. This requires \ - that the ledger is loaded from a full snapshot, which will be used as the \ - base for the incremental snapshot.") - .conflicts_with("no_snapshot") - ) - .arg( - Arg::with_name("minimized") - .long("minimized") - .takes_value(false) - .help("Create a minimized snapshot instead of a full snapshot. This snapshot \ - will only include information needed to replay the ledger from the \ - snapshot slot to the ending slot.") - .conflicts_with("incremental") - .requires("ending_slot") - ) - .arg( - Arg::with_name("ending_slot") - .long("ending-slot") - .takes_value(true) - .value_name("ENDING_SLOT") - .help("Ending slot for minimized snapshot creation") - ) - .arg( - Arg::with_name("snapshot_archive_format") - .long("snapshot-archive-format") - .possible_values(SUPPORTED_ARCHIVE_COMPRESSION) - .default_value(DEFAULT_ARCHIVE_COMPRESSION) - .value_name("ARCHIVE_TYPE") - .takes_value(true) - .help("Snapshot archive format to use.") - .conflicts_with("no_snapshot") - ) - ).subcommand( + .about("Create a new ledger snapshot") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accountsdb_skip_shrink) + .arg(&ancient_append_vecs) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&snapshot_version_arg) + .arg(&maximum_full_snapshot_archives_to_retain) + .arg(&maximum_incremental_snapshot_archives_to_retain) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("snapshot_slot") + .index(1) + .value_name("SLOT") + .validator(|value| { + if value.parse::().is_ok() || value == "ROOT" { + Ok(()) + } else { + Err(format!( + "Unable to parse as a number or the keyword ROOT, provided: \ + {value}" + )) + } + }) + .takes_value(true) + .help( + "Slot at which to create the snapshot; accepts keyword ROOT for the \ + highest root", + ), + ) + .arg( + Arg::with_name("output_directory") + .index(2) + .value_name("DIR") + .takes_value(true) + .help( + "Output directory for the snapshot \ + [default: --snapshot-archive-path if present else --ledger directory]", + ), + ) + .arg( + Arg::with_name("warp_slot") + .required(false) + .long("warp-slot") + .takes_value(true) + .value_name("WARP_SLOT") + .validator(is_slot) + .help( + "After loading the snapshot slot warp the ledger to WARP_SLOT, which \ + could be a slot in a galaxy far far away", + ), + ) + .arg( + Arg::with_name("faucet_lamports") + .short("t") + .long("faucet-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .requires("faucet_pubkey") + .help("Number of lamports to assign to the faucet"), + ) + .arg( + Arg::with_name("faucet_pubkey") + .short("m") + .long("faucet-pubkey") + .value_name("PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .requires("faucet_lamports") + .help("Path to file containing the faucet's pubkey"), + ) + .arg( + Arg::with_name("bootstrap_validator") + .short("b") + .long("bootstrap-validator") + .value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .number_of_values(3) + .multiple(true) + .help("The bootstrap validator's identity, vote and stake pubkeys"), + ) + .arg( + Arg::with_name("bootstrap_stake_authorized_pubkey") + .long("bootstrap-stake-authorized-pubkey") + .value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .help( + "Path to file containing the pubkey authorized to manage the \ + bootstrap validator's stake + [default: --bootstrap-validator IDENTITY_PUBKEY]", + ), + ) + .arg( + Arg::with_name("bootstrap_validator_lamports") + .long("bootstrap-validator-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .default_value(default_bootstrap_validator_lamports) + .help("Number of lamports to assign to the bootstrap validator"), + ) + .arg( + Arg::with_name("bootstrap_validator_stake_lamports") + .long("bootstrap-validator-stake-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .default_value(default_bootstrap_validator_stake_lamports) + .help( + "Number of lamports to assign to the bootstrap validator's stake \ + account", + ), + ) + .arg( + Arg::with_name("rent_burn_percentage") + .long("rent-burn-percentage") + .value_name("NUMBER") + .takes_value(true) + .help("Adjust percentage of collected rent to burn") + .validator(is_valid_percentage), + ) + .arg(&hashes_per_tick) + .arg( + Arg::with_name("accounts_to_remove") + .required(false) + .long("remove-account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of accounts to remove while creating the snapshot"), + ) + .arg( + Arg::with_name("feature_gates_to_deactivate") + .required(false) + .long("deactivate-feature-gate") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of feature gates to deactivate while creating the snapshot"), + ) + .arg( + Arg::with_name("vote_accounts_to_destake") + .required(false) + .long("destake-vote-account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of validator vote accounts to destake"), + ) + .arg( + Arg::with_name("remove_stake_accounts") + .required(false) + .long("remove-stake-accounts") + .takes_value(false) + .help("Remove all existing stake accounts from the new snapshot"), + ) + .arg( + Arg::with_name("incremental") + .long("incremental") + .takes_value(false) + .help( + "Create an incremental snapshot instead of a full snapshot. This \ + requires that the ledger is loaded from a full snapshot, which will \ + be used as the base for the incremental snapshot.", + ) + .conflicts_with("no_snapshot"), + ) + .arg( + Arg::with_name("minimized") + .long("minimized") + .takes_value(false) + .help( + "Create a minimized snapshot instead of a full snapshot. This \ + snapshot will only include information needed to replay the ledger \ + from the snapshot slot to the ending slot.", + ) + .conflicts_with("incremental") + .requires("ending_slot"), + ) + .arg( + Arg::with_name("ending_slot") + .long("ending-slot") + .takes_value(true) + .value_name("ENDING_SLOT") + .help("Ending slot for minimized snapshot creation"), + ) + .arg( + Arg::with_name("snapshot_archive_format") + .long("snapshot-archive-format") + .possible_values(SUPPORTED_ARCHIVE_COMPRESSION) + .default_value(DEFAULT_ARCHIVE_COMPRESSION) + .value_name("ARCHIVE_TYPE") + .takes_value(true) + .help("Snapshot archive format to use.") + .conflicts_with("no_snapshot"), + ), + ) + .subcommand( SubCommand::with_name("accounts") - .about("Print account stats and contents after processing the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&geyser_plugin_args) - .arg(&accounts_data_encoding_arg) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("include_sysvars") - .long("include-sysvars") - .takes_value(false) - .help("Include sysvars too"), - ) - .arg( - Arg::with_name("no_account_contents") - .long("no-account-contents") - .takes_value(false) - .help("Do not print contents of each account, which is very slow with lots of accounts."), - ) - .arg(Arg::with_name("no_account_data") - .long("no-account-data") - .takes_value(false) - .help("Do not print account data when printing account contents."), - ) - .arg(&max_genesis_archive_unpacked_size_arg) - ).subcommand( + .about("Print account stats and contents after processing the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&geyser_plugin_args) + .arg(&accounts_data_encoding_arg) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("include_sysvars") + .long("include-sysvars") + .takes_value(false) + .help("Include sysvars too"), + ) + .arg( + Arg::with_name("no_account_contents") + .long("no-account-contents") + .takes_value(false) + .help( + "Do not print contents of each account, which is very slow with lots \ + of accounts.", + ), + ) + .arg( + Arg::with_name("no_account_data") + .long("no-account-data") + .takes_value(false) + .help("Do not print account data when printing account contents."), + ) + .arg(&max_genesis_archive_unpacked_size_arg), + ) + .subcommand( SubCommand::with_name("capitalization") - .about("Print capitalization (aka, total supply) while checksumming it") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("warp_epoch") - .required(false) - .long("warp-epoch") - .takes_value(true) - .value_name("WARP_EPOCH") - .help("After loading the snapshot warp the ledger to WARP_EPOCH, \ - which could be an epoch in a galaxy far far away"), - ) - .arg( - Arg::with_name("inflation") - .required(false) - .long("inflation") - .takes_value(true) - .possible_values(&["pico", "full", "none"]) - .help("Overwrite inflation when warping"), - ) - .arg( - Arg::with_name("enable_credits_auto_rewind") - .required(false) - .long("enable-credits-auto-rewind") - .takes_value(false) - .help("Enable credits auto rewind"), - ) - .arg( - Arg::with_name("recalculate_capitalization") - .required(false) - .long("recalculate-capitalization") - .takes_value(false) - .help("Recalculate capitalization before warping; circumvents \ - bank's out-of-sync capitalization"), - ) - .arg( - Arg::with_name("csv_filename") - .long("csv-filename") - .value_name("FILENAME") - .takes_value(true) - .help("Output file in the csv format"), - ) - ).subcommand( + .about("Print capitalization (aka, total supply) while checksumming it") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("warp_epoch") + .required(false) + .long("warp-epoch") + .takes_value(true) + .value_name("WARP_EPOCH") + .help( + "After loading the snapshot warp the ledger to WARP_EPOCH, which \ + could be an epoch in a galaxy far far away", + ), + ) + .arg( + Arg::with_name("inflation") + .required(false) + .long("inflation") + .takes_value(true) + .possible_values(&["pico", "full", "none"]) + .help("Overwrite inflation when warping"), + ) + .arg( + Arg::with_name("enable_credits_auto_rewind") + .required(false) + .long("enable-credits-auto-rewind") + .takes_value(false) + .help("Enable credits auto rewind"), + ) + .arg( + Arg::with_name("recalculate_capitalization") + .required(false) + .long("recalculate-capitalization") + .takes_value(false) + .help( + "Recalculate capitalization before warping; circumvents bank's \ + out-of-sync capitalization", + ), + ) + .arg( + Arg::with_name("csv_filename") + .long("csv-filename") + .value_name("FILENAME") + .takes_value(true) + .help("Output file in the csv format"), + ), + ) + .subcommand( SubCommand::with_name("purge") - .about("Delete a range of slots from the ledger") - .arg( - Arg::with_name("start_slot") - .index(1) - .value_name("SLOT") - .takes_value(true) - .required(true) - .help("Start slot to purge from (inclusive)"), - ) - .arg( - Arg::with_name("end_slot") - .index(2) - .value_name("SLOT") - .help("Ending slot to stop purging (inclusive) \ - [default: the highest slot in the ledger]"), - ) - .arg( - Arg::with_name("batch_size") - .long("batch-size") - .value_name("NUM") - .takes_value(true) - .default_value("1000") - .help("Removes at most BATCH_SIZE slots while purging in loop"), - ) - .arg( - Arg::with_name("no_compaction") - .long("no-compaction") - .required(false) - .takes_value(false) - .help("--no-compaction is deprecated, ledger compaction \ - after purge is disabled by default") - .conflicts_with("enable_compaction") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("enable_compaction") - .long("enable-compaction") - .required(false) - .takes_value(false) - .help("Perform ledger compaction after purge. Compaction \ - will optimize storage space, but may take a long \ - time to complete.") - .conflicts_with("no_compaction") - ) - .arg( - Arg::with_name("dead_slots_only") - .long("dead-slots-only") - .required(false) - .takes_value(false) - .help("Limit purging to dead slots only") - ) + .about("Delete a range of slots from the ledger") + .arg( + Arg::with_name("start_slot") + .index(1) + .value_name("SLOT") + .takes_value(true) + .required(true) + .help("Start slot to purge from (inclusive)"), + ) + .arg(Arg::with_name("end_slot").index(2).value_name("SLOT").help( + "Ending slot to stop purging (inclusive) \ + [default: the highest slot in the ledger]", + )) + .arg( + Arg::with_name("batch_size") + .long("batch-size") + .value_name("NUM") + .takes_value(true) + .default_value("1000") + .help("Removes at most BATCH_SIZE slots while purging in loop"), + ) + .arg( + Arg::with_name("no_compaction") + .long("no-compaction") + .required(false) + .takes_value(false) + .help( + "--no-compaction is deprecated, ledger compaction after purge is \ + disabled by default", + ) + .conflicts_with("enable_compaction") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("enable_compaction") + .long("enable-compaction") + .required(false) + .takes_value(false) + .help( + "Perform ledger compaction after purge. Compaction will optimize \ + storage space, but may take a long time to complete.", + ) + .conflicts_with("no_compaction"), + ) + .arg( + Arg::with_name("dead_slots_only") + .long("dead-slots-only") + .required(false) + .takes_value(false) + .help("Limit purging to dead slots only"), + ), ) .subcommand( SubCommand::with_name("list-roots") - .about("Output up to last root hashes and their \ - heights starting at the given block height") - .arg( - Arg::with_name("max_height") - .long("max-height") - .value_name("NUM") - .takes_value(true) - .help("Maximum block height") - ) - .arg( - Arg::with_name("start_root") - .long("start-root") - .value_name("NUM") - .takes_value(true) - .help("First root to start searching from") - ) - .arg( - Arg::with_name("slot_list") - .long("slot-list") - .value_name("FILENAME") - .required(false) - .takes_value(true) - .help("The location of the output YAML file. A list of \ - rollback slot heights and hashes will be written to the file") - ) - .arg( - Arg::with_name("num_roots") - .long("num-roots") - .value_name("NUM") - .takes_value(true) - .default_value(DEFAULT_ROOT_COUNT) - .required(false) - .help("Number of roots in the output"), - ) + .about( + "Output up to last root hashes and their heights starting at the \ + given block height", + ) + .arg( + Arg::with_name("max_height") + .long("max-height") + .value_name("NUM") + .takes_value(true) + .help("Maximum block height"), + ) + .arg( + Arg::with_name("start_root") + .long("start-root") + .value_name("NUM") + .takes_value(true) + .help("First root to start searching from"), + ) + .arg( + Arg::with_name("slot_list") + .long("slot-list") + .value_name("FILENAME") + .required(false) + .takes_value(true) + .help( + "The location of the output YAML file. A list of rollback slot \ + heights and hashes will be written to the file", + ), + ) + .arg( + Arg::with_name("num_roots") + .long("num-roots") + .value_name("NUM") + .takes_value(true) + .default_value(DEFAULT_ROOT_COUNT) + .required(false) + .help("Number of roots in the output"), + ), ) .subcommand( SubCommand::with_name("latest-optimistic-slots") - .about("Output up to the most recent optimistic \ - slots with their hashes and timestamps.") + .about( + "Output up to the most recent optimistic slots with their hashes \ + and timestamps.", + ) .arg( Arg::with_name("num_slots") .long("num-slots") @@ -2108,25 +2186,27 @@ fn main() { .long("exclude-vote-only-slots") .required(false) .help("Exclude slots that contain only votes from output"), - ) + ), ) .subcommand( SubCommand::with_name("repair-roots") - .about("Traverses the AncestorIterator backward from a last known root \ - to restore missing roots to the Root column") + .about( + "Traverses the AncestorIterator backward from a last known root to restore \ + missing roots to the Root column", + ) .arg( Arg::with_name("start_root") .long("before") .value_name("NUM") .takes_value(true) - .help("Recent root after the range to repair") + .help("Recent root after the range to repair"), ) .arg( Arg::with_name("end_root") .long("until") .value_name("NUM") .takes_value(true) - .help("Earliest slot to check for root repair") + .help("Earliest slot to check for root repair"), ) .arg( Arg::with_name("max_slots") @@ -2135,40 +2215,47 @@ fn main() { .takes_value(true) .default_value(DEFAULT_MAX_SLOTS_ROOT_REPAIR) .required(true) - .help("Override the maximum number of slots to check for root repair") - ) - ) - .subcommand( - SubCommand::with_name("analyze-storage") - .about("Output statistics in JSON format about \ - all column families in the ledger rocksdb") + .help("Override the maximum number of slots to check for root repair"), + ), ) + .subcommand(SubCommand::with_name("analyze-storage").about( + "Output statistics in JSON format about all column families in the ledger rocksdb", + )) .subcommand( SubCommand::with_name("compute-slot-cost") - .about("runs cost_model over the block at the given slots, \ - computes how expensive a block was based on cost_model") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .multiple(true) - .takes_value(true) - .help("Slots that their blocks are computed for cost, default to all slots in ledger"), - ) + .about( + "runs cost_model over the block at the given slots, computes how expensive a \ + block was based on cost_model", + ) + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .multiple(true) + .takes_value(true) + .help( + "Slots that their blocks are computed for cost, default to all slots \ + in ledger", + ), + ), ) .subcommand( SubCommand::with_name("print-file-metadata") - .about("Print the metadata of the specified ledger-store file. \ - If no file name is specified, it will print the metadata of all ledger files.") - .arg( - Arg::with_name("file_name") - .long("file-name") - .takes_value(true) - .value_name("SST_FILE_NAME") - .help("The ledger file name (e.g. 011080.sst.) \ - If no file name is specified, it will print the metadata of all ledger files.") - ) + .about( + "Print the metadata of the specified ledger-store file. If no file name is \ + specified, it will print the metadata of all ledger files.", + ) + .arg( + Arg::with_name("file_name") + .long("file-name") + .takes_value(true) + .value_name("SST_FILE_NAME") + .help( + "The ledger file name (e.g. 011080.sst.) If no file name is \ + specified, it will print the metadata of all ledger files.", + ), + ), ) .program_subcommand() .get_matches(); @@ -2242,11 +2329,10 @@ fn main() { let _ = get_shred_storage_type( &target_db, &format!( - "No --target-db ledger at {:?} was detected, default \ - compaction (RocksLevel) will be used. Fifo compaction \ - can be enabled for a new ledger by manually creating \ - {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory within \ - the specified --target_db directory.", + "No --target-db ledger at {:?} was detected, default compaction \ + (RocksLevel) will be used. Fifo compaction can be enabled for a new \ + ledger by manually creating {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory \ + within the specified --target_db directory.", &target_db ), ); @@ -2803,8 +2889,8 @@ fn main() { let minimum_stake_lamports = rent.minimum_balance(StakeStateV2::size_of()); if bootstrap_validator_stake_lamports < minimum_stake_lamports { eprintln!( - "Error: insufficient --bootstrap-validator-stake-lamports. \ - Minimum amount is {minimum_stake_lamports}" + "Error: insufficient --bootstrap-validator-stake-lamports. Minimum amount \ + is {minimum_stake_lamports}" ); exit(1); } @@ -2884,7 +2970,8 @@ fn main() { .is_none() { eprintln!( - "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is not full.", + "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is \ + not full.", ); exit(1); } @@ -2894,7 +2981,8 @@ fn main() { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { eprintln!( - "Error: ending_slot ({ending_slot}) must be greater than snapshot_slot ({snapshot_slot})" + "Error: ending_slot ({ending_slot}) must be greater than \ + snapshot_slot ({snapshot_slot})" ); exit(1); } @@ -3003,7 +3091,8 @@ fn main() { for address in feature_gates_to_deactivate { let mut account = bank.get_account(&address).unwrap_or_else(|| { eprintln!( - "Error: Feature-gate account does not exist, unable to deactivate it: {address}" + "Error: Feature-gate account does not exist, unable to \ + deactivate it: {address}" ); exit(1); }); @@ -3133,7 +3222,8 @@ fn main() { if let Some(warp_slot) = warp_slot { if warp_slot < minimum_warp_slot { eprintln!( - "Error: --warp-slot too close. Must be >= {minimum_warp_slot}" + "Error: --warp-slot too close. Must be >= \ + {minimum_warp_slot}" ); exit(1); } @@ -3186,13 +3276,17 @@ fn main() { if is_incremental { if starting_snapshot_hashes.is_none() { - eprintln!("Unable to create incremental snapshot without a base full snapshot"); + eprintln!( + "Unable to create incremental snapshot without a base full \ + snapshot" + ); exit(1); } let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.0 .0; if bank.slot() <= full_snapshot_slot { eprintln!( - "Unable to create incremental snapshot: Slot must be greater than full snapshot slot. slot: {}, full snapshot slot: {}", + "Unable to create incremental snapshot: Slot must be greater \ + than full snapshot slot. slot: {}, full snapshot slot: {}", bank.slot(), full_snapshot_slot, ); @@ -3217,7 +3311,8 @@ fn main() { }); println!( - "Successfully created incremental snapshot for slot {}, hash {}, base slot: {}: {}", + "Successfully created incremental snapshot for slot {}, hash {}, \ + base slot: {}: {}", bank.slot(), bank.hash(), full_snapshot_slot, @@ -3252,12 +3347,23 @@ fn main() { let ending_epoch = bank.epoch_schedule().get_epoch(ending_slot.unwrap()); if starting_epoch != ending_epoch { - warn!("Minimized snapshot range crosses epoch boundary ({} to {}). Bank hashes after {} will not match replays from a full snapshot", - starting_epoch, ending_epoch, bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch)); + warn!( + "Minimized snapshot range crosses epoch boundary ({} to \ + {}). Bank hashes after {} will not match replays from a \ + full snapshot", + starting_epoch, + ending_epoch, + bank.epoch_schedule() + .get_last_slot_in_epoch(starting_epoch) + ); } if minimize_snapshot_possibly_incomplete { - warn!("Minimized snapshot may be incomplete due to missing accounts from CPI'd address lookup table extensions. This may lead to mismatched bank hashes while replaying."); + warn!( + "Minimized snapshot may be incomplete due to missing \ + accounts from CPI'd address lookup table extensions. \ + This may lead to mismatched bank hashes while replaying." + ); } } } @@ -3510,8 +3616,9 @@ fn main() { let old_cap = base_bank.set_capitalization(); let new_cap = base_bank.capitalization(); warn!( - "Skewing capitalization a bit to enable credits_auto_rewind as \ - requested: increasing {} from {} to {}", + "Skewing capitalization a bit to enable \ + credits_auto_rewind as requested: increasing {} from {} \ + to {}", feature_account_balance, old_cap, new_cap, ); assert_eq!( @@ -3945,13 +4052,14 @@ fn main() { exit(1); } info!( - "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot only: {})", - start_slot, - end_slot, - end_slot - start_slot, - perform_compaction, - dead_slots_only, - ); + "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot \ + only: {})", + start_slot, + end_slot, + end_slot - start_slot, + perform_compaction, + dead_slots_only, + ); let purge_from_blockstore = |start_slot, end_slot| { blockstore.purge_from_next_slots(start_slot, end_slot); if perform_compaction { @@ -4084,9 +4192,8 @@ fn main() { let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked if arg_matches.is_present("end_root") && num_slots > max_slots { eprintln!( - "Requested range {num_slots} too large, max {max_slots}. \ - Either adjust `--until` value, or pass a larger `--repair-limit` \ - to override the limit", + "Requested range {num_slots} too large, max {max_slots}. Either adjust \ + `--until` value, or pass a larger `--repair-limit` to override the limit", ); exit(1); } diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index bc44841f818ba0..616dcabbc431f5 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -143,8 +143,8 @@ impl ProgramSubCommand for App<'_, '_> { fn program_subcommand(self) -> Self { let program_arg = Arg::with_name("PROGRAM") .help( - "Program file to use. This is either an ELF shared-object file to be executed, \ - or an assembly file to be assembled and executed.", + "Program file to use. This is either an ELF shared-object file to be executed, or \ + an assembly file to be assembled and executed.", ) .required(true) .index(1);