From 0382832d0b6fb22db8068d0a80c84e752c8411bd Mon Sep 17 00:00:00 2001 From: Lone G Date: Thu, 17 Oct 2024 11:37:13 +0800 Subject: [PATCH] feat: add rpc crate Signed-off-by: Lone G --- .gitignore | 2 + Cargo.lock | 224 +- Cargo.toml | 32 +- rpc/Cargo.toml | 64 + rpc/src/account_resolver.rs | 15 + rpc/src/error.rs | 12 + rpc/src/filter.rs | 13 + rpc/src/jsonrpc/cache.rs | 59 + rpc/src/jsonrpc/core.rs | 7564 ++++++++++++++++++++++++++++++ rpc/src/jsonrpc/mod.rs | 3 + rpc/src/jsonrpc/service.rs | 506 ++ rpc/src/lib.rs | 21 + rpc/src/parsed_token_accounts.rs | 127 + rpc/src/service.rs | 153 + 14 files changed, 8730 insertions(+), 65 deletions(-) create mode 100644 rpc/Cargo.toml create mode 100644 rpc/src/account_resolver.rs create mode 100644 rpc/src/error.rs create mode 100644 rpc/src/filter.rs create mode 100644 rpc/src/jsonrpc/cache.rs create mode 100644 rpc/src/jsonrpc/core.rs create mode 100644 rpc/src/jsonrpc/mod.rs create mode 100644 rpc/src/jsonrpc/service.rs create mode 100644 rpc/src/lib.rs create mode 100644 rpc/src/parsed_token_accounts.rs create mode 100644 rpc/src/service.rs diff --git a/.gitignore b/.gitignore index 189c290..41d0f2c 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,5 @@ target/ *.swp bak/* + +rpc/farf diff --git a/Cargo.lock b/Cargo.lock index 95e9a56..1d6f220 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1306,6 +1306,20 @@ dependencies = [ "rayon", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -1755,9 +1769,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1770,9 +1784,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1780,15 +1794,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1798,15 +1812,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1815,21 +1829,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1923,7 +1937,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.30", + "futures 0.3.31", "log", "reqwest", "serde", @@ -1961,7 +1975,7 @@ dependencies = [ "indexmap 2.5.0", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.12", "tracing", ] @@ -2175,7 +2189,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.30", + "futures 0.3.31", "headers", "http", "hyper", @@ -2311,6 +2325,64 @@ dependencies = [ "tokio", ] +[[package]] +name = "igloo-rpc" +version = "0.1.0" +dependencies = [ + "anyhow", + "base64 0.22.1", + "bincode", + "bs58", + "crossbeam-channel", + "dashmap 6.1.0", + "futures 0.3.31", + "igloo-storage", + "itertools 0.13.0", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-http-server", + "jsonrpc-pubsub", + "libc", + "log", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "soketto 0.8.0", + "solana-account-decoder", + "solana-accounts-db", + "solana-client", + "solana-core", + "solana-entry", + "solana-faucet", + "solana-inline-spl", + "solana-ledger", + "solana-measure", + "solana-metrics", + "solana-perf", + "solana-program", + "solana-rayon-threadlimit", + "solana-rpc", + "solana-rpc-client-api", + "solana-runtime", + "solana-sdk 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "solana-stake-program", + "solana-storage-bigtable", + "solana-streamer", + "solana-transaction-status", + "solana-version", + "spl-pod", + "spl-token", + "spl-token-2022", + "stream-cancel", + "tempfile", + "thiserror", + "tokio", + "tokio-util 0.7.12", +] + [[package]] name = "igloo-storage" version = "0.1.0" @@ -2494,6 +2566,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -2536,7 +2617,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "jsonrpc-pubsub", "log", @@ -2551,7 +2632,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "futures-executor", "futures-util", "log", @@ -2566,7 +2647,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-client-transports", ] @@ -2588,7 +2669,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2604,7 +2685,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "lazy_static", "log", @@ -2620,7 +2701,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.30", + "futures 0.3.31", "globset", "jsonrpc-core", "lazy_static", @@ -2673,9 +2754,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -3915,9 +3996,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -3927,9 +4008,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -3938,9 +4019,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -3979,7 +4060,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.11", + "tokio-util 0.7.12", "tower-service", "url 2.5.2", "wasm-bindgen", @@ -4266,9 +4347,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -4284,9 +4365,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -4295,9 +4376,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -4520,13 +4601,28 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "httparse", "log", "rand 0.8.5", "sha-1", ] +[[package]] +name = "soketto" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures 0.3.31", + "httparse", + "log", + "rand 0.8.5", + "sha1", +] + [[package]] name = "solana-account-decoder" version = "2.0.7" @@ -4565,7 +4661,7 @@ dependencies = [ "bytemuck_derive", "bzip2", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "index_list", "indexmap 2.5.0", "itertools 0.12.1", @@ -4714,8 +4810,8 @@ checksum = "35e9f44bd660baa87ddfa9d7b6c7b65091a9dd1feb6ebb40e40cff8c27e1631a" dependencies = [ "async-trait", "bincode", - "dashmap", - "futures 0.3.30", + "dashmap 5.5.3", + "futures 0.3.31", "futures-util", "indexmap 2.5.0", "indicatif", @@ -4807,9 +4903,9 @@ dependencies = [ "bytes", "chrono", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "etcd-client", - "futures 0.3.30", + "futures 0.3.31", "histogram", "itertools 0.12.1", "lazy_static", @@ -5052,10 +5148,10 @@ dependencies = [ "chrono", "chrono-humanize", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "eager", "fs_extra", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy-lru", "lazy_static", @@ -5361,7 +5457,7 @@ checksum = "b36731edca01a316f722ab14f11b09864d88ca292c4703a8882312890525f3e3" dependencies = [ "async-mutex", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy_static", "log", @@ -5418,7 +5514,7 @@ dependencies = [ "bincode", "bs58", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "itertools 0.12.1", "jsonrpc-core", "jsonrpc-core-client", @@ -5432,7 +5528,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "soketto", + "soketto 0.7.1", "solana-account-decoder", "solana-accounts-db", "solana-client", @@ -5464,7 +5560,7 @@ dependencies = [ "stream-cancel", "thiserror", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.12", ] [[package]] @@ -5547,7 +5643,7 @@ dependencies = [ "byteorder", "bzip2", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "dir-diff", "flate2", "fnv", @@ -5785,7 +5881,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.30", + "futures 0.3.31", "goauth", "http", "hyper", @@ -5833,7 +5929,7 @@ dependencies = [ "async-channel", "bytes", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "futures-util", "histogram", "indexmap 2.5.0", @@ -5990,7 +6086,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "log", "lru", @@ -6062,7 +6158,7 @@ checksum = "6e2ff783873c0783f083adc1a195fc88e2603a48e2e76628b3a6661a8ccf2c58" dependencies = [ "assert_matches", "crossbeam-channel", - "dashmap", + "dashmap 5.5.3", "derivative", "log", "qualifier_attr", @@ -6328,9 +6424,9 @@ dependencies = [ [[package]] name = "spl-pod" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c704c88fc457fa649ba3aabe195c79d885c3f26709efaddc453c8de352c90b87" +checksum = "e6166a591d93af33afd75bbd8573c5fd95fb1213f1bf254f0508c89fdb5ee156" dependencies = [ "borsh 1.5.1", "bytemuck", @@ -6922,9 +7018,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -7018,7 +7114,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.12", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index fcba656..828bdf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "svm/cli", "svm/executor", "verifier", + "rpc", ] resolver = "2" @@ -37,6 +38,26 @@ rand = "0.8.5" crossbeam-channel = "0.5.13" tempfile = "3.3.0" assert_matches = "1.5.0" +async-trait = "0.1.80" +bs58 = "0.5.1" +base64 = "0.22.1" +dashmap = "6.1.0" +futures = "0.3.31" +itertools = "0.13.0" +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-http-server = "18.0.0" +jsonrpc-derive = "18.0.0" +jsonrpc-pubsub = "18.0.0" +libc = "0.2.159" +rayon = "1.10.0" +regex = "1.11.0" +serde = "1.0.210" +serde_derive = "1.0.210" +serde_json = "1.0.128" +soketto = "0.8.0" +stream-cancel = "0.8.2" +tokio-util = "0.7.12" solana-bpf-loader-program = { version = "=2.0.7" } solana-compute-budget = { version = "=2.0.7" } @@ -62,13 +83,22 @@ solana-metrics = { version = "=2.0.7" } solana-client = { version = "=2.0.7" } solana-connection-cache = { version = "=2.0.7" } solana-send-transaction-service = { version = "=2.0.7" } +solana-faucet = { version = "=2.0.7" } +solana-inline-spl = { version = "=2.0.7" } +solana-perf = { version = "=2.0.7" } +solana-program = { version = "=2.0.7" } +solana-rayon-threadlimit = { version = "=2.0.7" } +solana-rpc-client-api = { version = "=2.0.7" } +solana-storage-bigtable = { version = "=2.0.7" } +solana-version = { version = "=2.0.7" } spl-token = "=6.0.0" spl-token-2022 = "=4.0.0" -async-trait = "0.1.80" +spl-pod = "=0.3.0" igloo-interface = { path = "interface" } igloo-storage = { path = "storage" } igloo-verifier = { path = "verifier" } +igloo-rpc = { path = "rpc" } svm-executor = { path = "svm/executor" } [patch.crates-io] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml new file mode 100644 index 0000000..9f92751 --- /dev/null +++ b/rpc/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "igloo-rpc" +version.workspace = true +authors.workspace = true +repository.workspace = true +homepage.workspace = true +license.workspace = true +edition.workspace = true + +[dependencies] +bs58 = { workspace = true } +libc = { workspace = true } +regex = { workspace = true } +serde_derive = { workspace = true } +stream-cancel = { workspace = true } +solana-account-decoder = { workspace = true } +solana-accounts-db = { workspace = true } +solana-core = { workspace = true } +solana-entry = { workspace = true } +solana-faucet = { workspace = true } +solana-inline-spl = { workspace = true } +solana-metrics = { workspace = true } +solana-perf = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } +solana-ledger = { workspace = true } +solana-sdk = { workspace = true } +solana-storage-bigtable = { workspace = true } +solana-streamer = { workspace = true } +solana-transaction-status = { workspace = true } +solana-rpc-client-api = { workspace = true } +solana-version = { workspace = true } +solana-program = { workspace = true } +solana-measure = { workspace = true } +solana-rayon-threadlimit = { workspace = true } +solana-stake-program = { workspace = true } +igloo-storage = { workspace = true } +soketto = { workspace = true } +spl-token = { workspace = true } +spl-token-2022 = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true, features = ["codec", "compat"] } +thiserror = { workspace = true } +jsonrpc-http-server = { workspace = true } +jsonrpc-core = { workspace = true } +jsonrpc-core-client = { workspace = true } +jsonrpc-derive = { workspace = true } +jsonrpc-pubsub = { workspace = true } +log = { workspace = true } +base64 = { workspace = true } +bincode = { workspace = true } +crossbeam-channel = { workspace = true } +futures = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +anyhow = { workspace = true } +tempfile = { workspace = true } +itertools = { workspace = true } +rayon = { workspace = true } +dashmap = { workspace = true } + +[dev-dependencies] +solana-rpc = { workspace = true } +solana-client = { workspace = true } +spl-pod = { workspace = true } \ No newline at end of file diff --git a/rpc/src/account_resolver.rs b/rpc/src/account_resolver.rs new file mode 100644 index 0000000..44d232a --- /dev/null +++ b/rpc/src/account_resolver.rs @@ -0,0 +1,15 @@ +use { + solana_runtime::bank::Bank, + solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, + std::collections::HashMap, +}; + +pub(crate) fn get_account_from_overwrites_or_bank( + pubkey: &Pubkey, + bank: &Bank, + overwrite_accounts: Option<&HashMap>, +) -> Option { + overwrite_accounts + .and_then(|accounts| accounts.get(pubkey).cloned()) + .or_else(|| bank.get_account(pubkey)) +} diff --git a/rpc/src/error.rs b/rpc/src/error.rs new file mode 100644 index 0000000..b8060a4 --- /dev/null +++ b/rpc/src/error.rs @@ -0,0 +1,12 @@ +use thiserror::Error; + +pub type Result = std::result::Result; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Init json rpc error: {0}")] + InitJsonRpc(String), + + #[error(transparent)] + IglooStorage(#[from] igloo_storage::Error), +} diff --git a/rpc/src/filter.rs b/rpc/src/filter.rs new file mode 100644 index 0000000..81cebd2 --- /dev/null +++ b/rpc/src/filter.rs @@ -0,0 +1,13 @@ +use { + solana_inline_spl::{token::GenericTokenAccount, token_2022::Account}, + solana_rpc_client_api::filter::RpcFilterType, + solana_sdk::account::{AccountSharedData, ReadableAccount}, +}; + +pub fn filter_allows(filter: &RpcFilterType, account: &AccountSharedData) -> bool { + match filter { + RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), + RpcFilterType::TokenAccountState => Account::valid_account_data(account.data()), + } +} diff --git a/rpc/src/jsonrpc/cache.rs b/rpc/src/jsonrpc/cache.rs new file mode 100644 index 0000000..bcff475 --- /dev/null +++ b/rpc/src/jsonrpc/cache.rs @@ -0,0 +1,59 @@ +use { + solana_rpc_client_api::{config::RpcLargestAccountsFilter, response::RpcAccountBalance}, + std::{ + collections::HashMap, + time::{Duration, SystemTime}, + }, +}; + +#[derive(Debug, Clone)] +pub struct LargestAccountsCache { + duration: u64, + cache: HashMap, LargestAccountsCacheValue>, +} + +#[derive(Debug, Clone)] +struct LargestAccountsCacheValue { + accounts: Vec, + slot: u64, + cached_time: SystemTime, +} + +impl LargestAccountsCache { + pub(crate) fn new(duration: u64) -> Self { + Self { + duration, + cache: HashMap::new(), + } + } + + pub(crate) fn get_largest_accounts( + &self, + filter: &Option, + ) -> Option<(u64, Vec)> { + self.cache.get(filter).and_then(|value| { + if let Ok(elapsed) = value.cached_time.elapsed() { + if elapsed < Duration::from_secs(self.duration) { + return Some((value.slot, value.accounts.clone())); + } + } + None + }) + } + + pub(crate) fn set_largest_accounts( + &mut self, + filter: &Option, + slot: u64, + accounts: &[RpcAccountBalance], + ) { + self.cache.insert( + filter.clone(), + LargestAccountsCacheValue { + accounts: accounts.to_owned(), + slot, + cached_time: SystemTime::now(), + }, + ); + } +} diff --git a/rpc/src/jsonrpc/core.rs b/rpc/src/jsonrpc/core.rs new file mode 100644 index 0000000..feb8f96 --- /dev/null +++ b/rpc/src/jsonrpc/core.rs @@ -0,0 +1,7564 @@ +//! The `rpc` module implements the Solana RPC interface. + +use super::cache::LargestAccountsCache; +use crate::{filter::filter_allows, parsed_token_accounts::*}; +use jsonrpc_core::ErrorCode; +use solana_program::vote::state::VoteState; +use solana_rpc_client_api::request::{ + DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY, +}; +use solana_runtime::commitment::BlockCommitmentArray; +use { + base64::{prelude::BASE64_STANDARD, Engine}, + bincode::config::Options, + crossbeam_channel::{unbounded, Receiver, Sender}, + jsonrpc_core::{futures::future, BoxFuture, Error, Metadata, Result}, + jsonrpc_derive::rpc, + serde_derive::{Deserialize, Serialize}, + solana_account_decoder::{ + parse_account_data::SplTokenAdditionalData, + parse_token::{is_known_spl_token_id, token_amount_to_ui_amount_v2, UiTokenAmount}, + UiAccount, UiAccountEncoding, UiDataSliceConfig, MAX_BASE58_BYTES, + }, + solana_accounts_db::{ + accounts::AccountAddressFilter, + accounts_index::{AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig}, + }, + solana_entry::entry::Entry, + solana_faucet::faucet::request_airdrop_transaction, + solana_inline_spl::{ + token::{SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, + token_2022::{self, ACCOUNTTYPE_ACCOUNT}, + }, + solana_ledger::{ + blockstore::{Blockstore, SignatureInfosForAddress}, + blockstore_db::BlockstoreError, + blockstore_meta::{PerfSample, PerfSampleV1, PerfSampleV2}, + get_tmp_ledger_path, + }, + solana_metrics::inc_new_counter_info, + solana_perf::packet::PACKET_DATA_SIZE, + solana_rpc_client_api::{ + config::*, + custom_error::RpcCustomError, + filter::{Memcmp, RpcFilterType}, + request::{ + TokenAccountsFilter, MAX_GET_CONFIRMED_BLOCKS_RANGE, + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT, MAX_GET_PROGRAM_ACCOUNT_FILTERS, + MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_MULTIPLE_ACCOUNTS, NUM_LARGEST_ACCOUNTS, + }, + response::{Response as RpcResponse, *}, + }, + solana_runtime::{ + bank::{Bank, TransactionSimulationResult}, + bank_forks::BankForks, + non_circulating_supply::calculate_non_circulating_supply, + prioritization_fee_cache::PrioritizationFeeCache, + snapshot_config::SnapshotConfig, + snapshot_utils, + }, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + clock::{Slot, UnixTimestamp, MAX_PROCESSING_AGE}, + commitment_config::CommitmentConfig, + epoch_info::EpochInfo, + epoch_rewards_hasher::EpochRewardsHasher, + epoch_schedule::EpochSchedule, + exit::Exit, + feature_set, + hash::Hash, + message::SanitizedMessage, + pubkey::{Pubkey, PUBKEY_BYTES}, + signature::{Keypair, Signature, Signer}, + system_instruction, + transaction::{ + self, AddressLoader, MessageHash, SanitizedTransaction, TransactionError, + VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, + }, + }, + solana_storage_bigtable::Error as StorageError, + solana_transaction_status::{ + map_inner_instructions, BlockEncodingOptions, ConfirmedBlock, + ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, + EncodedConfirmedTransactionWithStatusMeta, Reward, RewardType, Rewards, + TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionStatus, + UiConfirmedBlock, UiTransactionEncoding, + }, + spl_token_2022::{ + extension::{ + interest_bearing_mint::InterestBearingConfig, BaseStateWithExtensions, + StateWithExtensions, + }, + solana_program::program_pack::Pack, + state::{Account as TokenAccount, Mint}, + }, + std::{ + any::type_name, + cmp::{max, min, Reverse}, + collections::{BinaryHeap, HashMap, HashSet}, + convert::TryFrom, + net::SocketAddr, + str::FromStr, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, RwLock, + }, + time::Duration, + }, +}; + +type RpcCustomResult = std::result::Result; + +pub const MAX_REQUEST_BODY_SIZE: usize = 50 * (1 << 10); // 50kB +pub const PERFORMANCE_SAMPLES_LIMIT: usize = 720; + +fn new_response(bank: &Bank, value: T) -> RpcResponse { + RpcResponse { + context: RpcResponseContext::new(bank.slot()), + value, + } +} + +fn is_finalized(bank: &Bank, blockstore: &Blockstore, slot: Slot) -> bool { + blockstore.is_root(slot) || bank.status_cache_ancestors().contains(&slot) +} + +#[derive(Debug, Default, Clone)] +pub struct JsonRpcConfig { + pub enable_rpc_transaction_history: bool, + pub enable_extended_tx_metadata_storage: bool, + // TODO: should replace to new faucet implementation + pub faucet_addr: Option, + pub rpc_bigtable_config: Option, + pub max_multiple_accounts: Option, + pub account_indexes: AccountSecondaryIndexes, + pub rpc_threads: usize, + pub rpc_niceness_adj: i8, + pub full_api: bool, + pub rpc_scan_and_fix_roots: bool, + pub max_request_body_size: Option, +} + +impl JsonRpcConfig { + pub fn default_for_test() -> Self { + Self { + full_api: true, + ..Self::default() + } + } +} + +#[derive(Debug, Clone)] +pub struct RpcBigtableConfig { + pub enable_bigtable_ledger_upload: bool, + pub bigtable_instance_name: String, + pub bigtable_app_profile_id: String, + pub timeout: Option, + pub max_message_size: usize, +} + +impl Default for RpcBigtableConfig { + fn default() -> Self { + let bigtable_instance_name = solana_storage_bigtable::DEFAULT_INSTANCE_NAME.to_string(); + let bigtable_app_profile_id = solana_storage_bigtable::DEFAULT_APP_PROFILE_ID.to_string(); + Self { + enable_bigtable_ledger_upload: false, + bigtable_instance_name, + bigtable_app_profile_id, + timeout: None, + max_message_size: solana_storage_bigtable::DEFAULT_MAX_MESSAGE_SIZE, + } + } +} + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct OutputAtBlockResp { + version: String, + output_root: String, + state_root: String, + withdrawal_root: String, + block_hash: String, +} + +/// SoonGetProofResp follows the format of eth_getProof response, but with specific soon info. +/// It contains follow fields: +/// * the merkle root of all solana account state. +/// * merkle proof for withdrawal native program address (calling WNP below). +/// * When one raise a withdrawal request, the WNP will derive a new pda account. +/// This api also generates root and proof of this pda in storage-hash way. +/// So api also contains: +/// * merkle root of WNP storage hash +/// * proof for pda address in WNP's storage root +#[derive(Serialize, Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct SoonGetWithdrawalProofResp { + pub state_root: String, // state root of all address + pub withdrawal_root: String, // state root of WNP storage_root + pub state_proof: Vec, // proof for WNP on root_hash + pub withdrawal_proof: Vec, // proof for pda on storage_hash + pub psr: String, + pub pwr: String, +} + +#[derive(Clone)] +pub struct JsonRpcRequestProcessor { + bank_forks: Arc>, + blockstore: Arc, + config: JsonRpcConfig, + snapshot_config: Option, + node_exit: Arc>, + genesis_hash: Hash, + tx_channel: (Sender, Receiver), + bigtable_ledger_storage: Option, + largest_accounts_cache: Arc>, + max_complete_transaction_status_slot: Arc, + prioritization_fee_cache: Arc, +} +impl Metadata for JsonRpcRequestProcessor {} + +impl JsonRpcRequestProcessor { + pub fn clone_without_bigtable(&self) -> JsonRpcRequestProcessor { + Self { + bigtable_ledger_storage: None, // Disable BigTable + ..self.clone() + } + } +} + +impl JsonRpcRequestProcessor { + fn get_bank_with_config(&self, config: RpcContextConfig) -> Result> { + let RpcContextConfig { + commitment, + min_context_slot, + } = config; + let bank = self.bank(commitment); + if let Some(min_context_slot) = min_context_slot { + if bank.slot() < min_context_slot { + return Err(RpcCustomError::MinContextSlotNotReached { + context_slot: bank.slot(), + } + .into()); + } + } + Ok(bank) + } + + #[allow(deprecated)] + fn bank(&self, _commitment: Option) -> Arc { + // NOTE: ingore commitment, always return the working bank + self.bank_forks.read().unwrap().working_bank() + } + + fn genesis_creation_time(&self) -> UnixTimestamp { + self.bank(None).genesis_creation_time() + } + + #[allow(clippy::too_many_arguments)] + pub fn new( + config: JsonRpcConfig, + snapshot_config: Option, + bank_forks: Arc>, + blockstore: Arc, + node_exit: Arc>, + genesis_hash: Hash, + tx_channel: (Sender, Receiver), + bigtable_ledger_storage: Option, + largest_accounts_cache: Arc>, + max_complete_transaction_status_slot: Arc, + prioritization_fee_cache: Arc, + ) -> Self { + Self { + config, + snapshot_config, + bank_forks, + blockstore, + node_exit, + genesis_hash, + tx_channel, + bigtable_ledger_storage, + largest_accounts_cache, + max_complete_transaction_status_slot, + prioritization_fee_cache, + } + } + + // Useful for unit testing + pub fn new_from_bank(bank: Bank) -> Self { + let genesis_hash = bank.hash(); + let bank_forks = BankForks::new_rw_arc(bank); + let blockstore = Arc::new(Blockstore::open(&get_tmp_ledger_path!()).unwrap()); + let exit = Arc::new(AtomicBool::new(false)); + + Self { + config: JsonRpcConfig::default(), + snapshot_config: None, + bank_forks, + blockstore: Arc::clone(&blockstore), + node_exit: create_node_exit(exit.clone()), + genesis_hash, + tx_channel: unbounded(), + bigtable_ledger_storage: None, + largest_accounts_cache: Arc::new(RwLock::new(LargestAccountsCache::new(30))), + max_complete_transaction_status_slot: Arc::new(AtomicU64::default()), + prioritization_fee_cache: Arc::new(PrioritizationFeeCache::default()), + } + } + + pub fn get_account_info( + &self, + pubkey: &Pubkey, + config: Option, + ) -> Result>> { + let RpcAccountInfoConfig { + encoding, + data_slice, + commitment, + min_context_slot, + } = config.unwrap_or_default(); + let bank = self.get_bank_with_config(RpcContextConfig { + commitment, + min_context_slot, + })?; + let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); + + let response = get_encoded_account(&bank, pubkey, encoding, data_slice, None)?; + Ok(new_response(&bank, response)) + } + + pub fn get_multiple_accounts( + &self, + pubkeys: Vec, + config: Option, + ) -> Result>>> { + let RpcAccountInfoConfig { + encoding, + data_slice, + commitment, + min_context_slot, + } = config.unwrap_or_default(); + let bank = self.get_bank_with_config(RpcContextConfig { + commitment, + min_context_slot, + })?; + let encoding = encoding.unwrap_or(UiAccountEncoding::Base64); + + let accounts = pubkeys + .into_iter() + .map(|pubkey| get_encoded_account(&bank, &pubkey, encoding, data_slice, None)) + .collect::>>()?; + Ok(new_response(&bank, accounts)) + } + + pub fn get_minimum_balance_for_rent_exemption( + &self, + data_len: usize, + commitment: Option, + ) -> u64 { + self.bank(commitment) + .get_minimum_balance_for_rent_exemption(data_len) + } + + pub fn get_program_accounts( + &self, + program_id: &Pubkey, + config: Option, + mut filters: Vec, + with_context: bool, + sort_results: bool, + ) -> Result>> { + let RpcAccountInfoConfig { + encoding, + data_slice: data_slice_config, + commitment, + min_context_slot, + } = config.unwrap_or_default(); + let bank = self.get_bank_with_config(RpcContextConfig { + commitment, + min_context_slot, + })?; + let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); + optimize_filters(&mut filters); + let keyed_accounts = { + if let Some(owner) = get_spl_token_owner_filter(program_id, &filters) { + self.get_filtered_spl_token_accounts_by_owner( + &bank, + program_id, + &owner, + filters, + sort_results, + )? + } else if let Some(mint) = get_spl_token_mint_filter(program_id, &filters) { + self.get_filtered_spl_token_accounts_by_mint( + &bank, + program_id, + &mint, + filters, + sort_results, + )? + } else { + self.get_filtered_program_accounts(&bank, program_id, filters, sort_results)? + } + }; + let accounts = if is_known_spl_token_id(program_id) + && encoding == UiAccountEncoding::JsonParsed + { + get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() + } else { + keyed_accounts + .into_iter() + .map(|(pubkey, account)| { + Ok(RpcKeyedAccount { + pubkey: pubkey.to_string(), + account: encode_account(&account, &pubkey, encoding, data_slice_config)?, + }) + }) + .collect::>>()? + }; + Ok(match with_context { + true => OptionalContext::Context(new_response(&bank, accounts)), + false => OptionalContext::NoContext(accounts), + }) + } + + fn filter_map_rewards<'a, F>( + rewards: &'a Option, + slot: Slot, + addresses: &'a [String], + reward_type_filter: &'a F, + ) -> HashMap + where + F: Fn(RewardType) -> bool, + { + Self::filter_rewards(rewards, reward_type_filter) + .filter(|reward| addresses.contains(&reward.pubkey)) + .map(|reward| (reward.pubkey.clone(), (reward.clone(), slot))) + .collect() + } + + fn filter_rewards<'a, F>( + rewards: &'a Option, + reward_type_filter: &'a F, + ) -> impl Iterator + where + F: Fn(RewardType) -> bool, + { + rewards + .iter() + .flatten() + .filter(move |reward| reward.reward_type.is_some_and(reward_type_filter)) + } + + pub async fn get_inflation_reward( + &self, + addresses: Vec, + config: Option, + ) -> Result>> { + let config = config.unwrap_or_default(); + let epoch_schedule = self.get_epoch_schedule(); + let first_available_block = self.get_first_available_block().await; + let context_config = RpcContextConfig { + commitment: config.commitment, + min_context_slot: config.min_context_slot, + }; + let epoch = match config.epoch { + Some(epoch) => epoch, + None => epoch_schedule + .get_epoch(self.get_slot(context_config)?) + .saturating_sub(1), + }; + + // Rewards for this epoch are found in the first confirmed block of the next epoch + let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch.saturating_add(1)); + if first_slot_in_epoch < first_available_block { + return if self.bigtable_ledger_storage.is_some() { + Err(RpcCustomError::LongTermStorageSlotSkipped { + slot: first_slot_in_epoch, + } + .into()) + } else { + Err(RpcCustomError::BlockCleanedUp { + slot: first_slot_in_epoch, + first_available_block, + } + .into()) + }; + } + + let first_confirmed_block_in_epoch = *self + .get_blocks_with_limit(first_slot_in_epoch, 1, Some(context_config)) + .await? + .first() + .ok_or(RpcCustomError::BlockNotAvailable { + slot: first_slot_in_epoch, + })?; + + // Determine if partitioned epoch rewards were enabled for the desired + // epoch + let bank = self.get_bank_with_config(context_config)?; + + // DO NOT CLEAN UP with feature_set::enable_partitioned_epoch_reward + // This logic needs to be retained indefinitely to support historical + // rewards before and after feature activation. + let partitioned_epoch_reward_enabled_slot = bank + .feature_set + .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()); + let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot + .map(|slot| slot <= first_confirmed_block_in_epoch) + .unwrap_or(false); + + // Get first block in the epoch + let Ok(Some(epoch_boundary_block)) = self + .get_block( + first_confirmed_block_in_epoch, + Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), + ) + .await + else { + return Err(RpcCustomError::BlockNotAvailable { + slot: first_confirmed_block_in_epoch, + } + .into()); + }; + + // Collect rewards from first block in the epoch if partitioned epoch + // rewards not enabled, or address is a vote account + let mut reward_map: HashMap = { + let addresses: Vec = + addresses.iter().map(|pubkey| pubkey.to_string()).collect(); + Self::filter_map_rewards( + &epoch_boundary_block.rewards, + first_confirmed_block_in_epoch, + &addresses, + &|reward_type| -> bool { + reward_type == RewardType::Voting + || (!partitioned_epoch_reward_enabled && reward_type == RewardType::Staking) + }, + ) + }; + + // Append stake account rewards from partitions if partitions epoch + // rewards is enabled + if partitioned_epoch_reward_enabled { + let num_partitions = epoch_boundary_block.num_reward_partitions.expect( + "epoch-boundary block should have num_reward_partitions after partitioned epoch \ + rewards enabled", + ); + + let num_partitions = usize::try_from(num_partitions) + .expect("num_partitions should never exceed usize::MAX"); + let hasher = EpochRewardsHasher::new( + num_partitions, + &Hash::from_str(&epoch_boundary_block.previous_blockhash) + .expect("UiConfirmedBlock::previous_blockhash should be properly formed"), + ); + let mut partition_index_addresses: HashMap> = HashMap::new(); + for address in addresses.iter() { + let address_string = address.to_string(); + // Skip this address if (Voting) rewards were already found in + // the first block of the epoch + if !reward_map.contains_key(&address_string) { + let partition_index = hasher.clone().hash_address_to_partition(address); + partition_index_addresses + .entry(partition_index) + .and_modify(|list| list.push(address_string.clone())) + .or_insert(vec![address_string]); + } + } + + let block_list = self + .get_blocks_with_limit( + first_confirmed_block_in_epoch + 1, + num_partitions, + Some(context_config), + ) + .await?; + + for (partition_index, addresses) in partition_index_addresses.iter() { + let slot = *block_list.get(*partition_index).ok_or_else(|| { + // If block_list.len() too short to contain + // partition_index, the epoch rewards period must be + // currently active. + let rewards_complete_block_height = epoch_boundary_block + .block_height + .map(|block_height| { + block_height + .saturating_add(num_partitions as u64) + .saturating_add(1) + }) + .expect( + "every block after partitioned_epoch_reward_enabled should have a \ + populated block_height", + ); + RpcCustomError::EpochRewardsPeriodActive { + slot: bank.slot(), + current_block_height: bank.block_height(), + rewards_complete_block_height, + } + })?; + + let Ok(Some(block)) = self + .get_block( + slot, + Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), + ) + .await + else { + return Err(RpcCustomError::BlockNotAvailable { slot }.into()); + }; + + let index_reward_map = Self::filter_map_rewards( + &block.rewards, + slot, + addresses, + &|reward_type| -> bool { reward_type == RewardType::Staking }, + ); + reward_map.extend(index_reward_map); + } + } + + let rewards = addresses + .iter() + .map(|address| { + if let Some((reward, slot)) = reward_map.get(&address.to_string()) { + return Some(RpcInflationReward { + epoch, + effective_slot: *slot, + amount: reward.lamports.unsigned_abs(), + post_balance: reward.post_balance, + commission: reward.commission, + }); + } + None + }) + .collect(); + + Ok(rewards) + } + + pub fn get_inflation_governor( + &self, + commitment: Option, + ) -> RpcInflationGovernor { + self.bank(commitment).inflation().into() + } + + pub fn get_inflation_rate(&self) -> RpcInflationRate { + let bank = self.bank(None); + let epoch = bank.epoch(); + let inflation = bank.inflation(); + let slot_in_year = bank.slot_in_year_for_inflation(); + + RpcInflationRate { + total: inflation.total(slot_in_year), + validator: inflation.validator(slot_in_year), + foundation: inflation.foundation(slot_in_year), + epoch, + } + } + + pub fn get_epoch_schedule(&self) -> EpochSchedule { + // Since epoch schedule data comes from the genesis config, any commitment level should be + // fine + let bank = self.bank(None); + bank.epoch_schedule().clone() + } + + pub fn get_balance( + &self, + pubkey: &Pubkey, + config: RpcContextConfig, + ) -> Result> { + let bank = self.get_bank_with_config(config)?; + Ok(new_response(&bank, bank.get_balance(pubkey))) + } + + pub fn confirm_transaction( + &self, + signature: &Signature, + commitment: Option, + ) -> Result> { + let bank = self.bank(commitment); + let status = bank.get_signature_status(signature); + match status { + Some(status) => Ok(new_response(&bank, status.is_ok())), + None => Ok(new_response(&bank, false)), + } + } + + fn get_block_commitment(&self, _block: Slot) -> RpcBlockCommitment { + // NOTE: not supported + RpcBlockCommitment { + commitment: None, + total_stake: 0, + } + } + + fn get_slot(&self, config: RpcContextConfig) -> Result { + let bank = self.get_bank_with_config(config)?; + Ok(bank.slot()) + } + + fn get_block_height(&self, config: RpcContextConfig) -> Result { + let bank = self.get_bank_with_config(config)?; + Ok(bank.block_height()) + } + + fn get_slot_leader(&self, config: RpcContextConfig) -> Result { + let bank = self.get_bank_with_config(config)?; + Ok(bank.collector_id().to_string()) + } + + fn get_slot_leaders( + &self, + commitment: Option, + _start_slot: Slot, + _limit: usize, + ) -> Result> { + let bank = self.bank(commitment); + Ok(vec![*bank.collector_id()]) + } + + fn minimum_ledger_slot(&self) -> Result { + match self.blockstore.slot_meta_iterator(0) { + Ok(mut metas) => match metas.next() { + Some((slot, _meta)) => Ok(slot), + None => Err(Error::invalid_request()), + }, + Err(err) => { + warn!("slot_meta_iterator failed: {:?}", err); + Err(Error::invalid_request()) + } + } + } + + fn get_transaction_count(&self, config: RpcContextConfig) -> Result { + let bank = self.get_bank_with_config(config)?; + Ok(bank.transaction_count()) + } + + fn get_cached_largest_accounts( + &self, + filter: &Option, + ) -> Option<(u64, Vec)> { + let largest_accounts_cache = self.largest_accounts_cache.read().unwrap(); + largest_accounts_cache.get_largest_accounts(filter) + } + + fn set_cached_largest_accounts( + &self, + filter: &Option, + slot: u64, + accounts: &[RpcAccountBalance], + ) { + let mut largest_accounts_cache = self.largest_accounts_cache.write().unwrap(); + largest_accounts_cache.set_largest_accounts(filter, slot, accounts) + } + + fn get_largest_accounts( + &self, + config: Option, + ) -> RpcCustomResult>> { + let config = config.unwrap_or_default(); + let bank = self.bank(config.commitment); + let sort_results = config.sort_results.unwrap_or(true); + + if let Some((slot, accounts)) = self.get_cached_largest_accounts(&config.filter) { + Ok(RpcResponse { + context: RpcResponseContext::new(slot), + value: accounts, + }) + } else { + let (addresses, address_filter) = if let Some(filter) = config.clone().filter { + let non_circulating_supply = + calculate_non_circulating_supply(&bank).map_err(|e| { + RpcCustomError::ScanError { + message: e.to_string(), + } + })?; + let addresses = non_circulating_supply.accounts.into_iter().collect(); + let address_filter = match filter { + RpcLargestAccountsFilter::Circulating => AccountAddressFilter::Exclude, + RpcLargestAccountsFilter::NonCirculating => AccountAddressFilter::Include, + }; + (addresses, address_filter) + } else { + (HashSet::new(), AccountAddressFilter::Exclude) + }; + let accounts = bank + .get_largest_accounts( + NUM_LARGEST_ACCOUNTS, + &addresses, + address_filter, + sort_results, + ) + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })? + .into_iter() + .map(|(address, lamports)| RpcAccountBalance { + address: address.to_string(), + lamports, + }) + .collect::>(); + + self.set_cached_largest_accounts(&config.filter, bank.slot(), &accounts); + Ok(new_response(&bank, accounts)) + } + } + + fn get_supply( + &self, + config: Option, + ) -> RpcCustomResult> { + let config = config.unwrap_or_default(); + let bank = self.bank(config.commitment); + let non_circulating_supply = + calculate_non_circulating_supply(&bank).map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })?; + let total_supply = bank.capitalization(); + let non_circulating_accounts = if config.exclude_non_circulating_accounts_list { + vec![] + } else { + non_circulating_supply + .accounts + .iter() + .map(|pubkey| pubkey.to_string()) + .collect() + }; + + Ok(new_response( + &bank, + RpcSupply { + total: total_supply, + circulating: total_supply - non_circulating_supply.lamports, + non_circulating: non_circulating_supply.lamports, + non_circulating_accounts, + }, + )) + } + + fn get_vote_accounts( + &self, + config: Option, + ) -> Result { + let config = config.unwrap_or_default(); + + let filter_by_vote_pubkey = if let Some(ref vote_pubkey) = config.vote_pubkey { + Some(verify_pubkey(vote_pubkey)?) + } else { + None + }; + + let bank = self.bank(config.commitment); + let vote_accounts = bank.vote_accounts(); + let epoch_vote_accounts = bank + .epoch_vote_accounts(bank.get_epoch_and_slot_index(bank.slot()).0) + .ok_or_else(Error::invalid_request)?; + let default_vote_state = VoteState::default(); + let delinquent_validator_slot_distance = config + .delinquent_slot_distance + .unwrap_or(DELINQUENT_VALIDATOR_SLOT_DISTANCE); + let (current_vote_accounts, delinquent_vote_accounts): ( + Vec, + Vec, + ) = vote_accounts + .iter() + .filter_map(|(vote_pubkey, (activated_stake, account))| { + if let Some(filter_by_vote_pubkey) = filter_by_vote_pubkey { + if *vote_pubkey != filter_by_vote_pubkey { + return None; + } + } + + let vote_state = account.vote_state(); + let vote_state = vote_state.unwrap_or(&default_vote_state); + let last_vote = if let Some(vote) = vote_state.votes.iter().last() { + vote.slot() + } else { + 0 + }; + + let epoch_credits = vote_state.epoch_credits(); + let epoch_credits = if epoch_credits.len() + > MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY + { + epoch_credits + .iter() + .skip(epoch_credits.len() - MAX_RPC_VOTE_ACCOUNT_INFO_EPOCH_CREDITS_HISTORY) + .cloned() + .collect() + } else { + epoch_credits.clone() + }; + + Some(RpcVoteAccountInfo { + vote_pubkey: vote_pubkey.to_string(), + node_pubkey: vote_state.node_pubkey.to_string(), + activated_stake: *activated_stake, + commission: vote_state.commission, + root_slot: vote_state.root_slot.unwrap_or(0), + epoch_credits, + epoch_vote_account: epoch_vote_accounts.contains_key(vote_pubkey), + last_vote, + }) + }) + .partition(|vote_account_info| { + if bank.slot() >= delinquent_validator_slot_distance { + vote_account_info.last_vote > bank.slot() - delinquent_validator_slot_distance + } else { + vote_account_info.last_vote > 0 + } + }); + + let keep_unstaked_delinquents = config.keep_unstaked_delinquents.unwrap_or_default(); + let delinquent_vote_accounts = if !keep_unstaked_delinquents { + delinquent_vote_accounts + .into_iter() + .filter(|vote_account_info| vote_account_info.activated_stake > 0) + .collect::>() + } else { + delinquent_vote_accounts + }; + + Ok(RpcVoteAccountStatus { + current: current_vote_accounts, + delinquent: delinquent_vote_accounts, + }) + } + + fn check_blockstore_root( + &self, + result: &std::result::Result, + slot: Slot, + ) -> Result<()> { + if let Err(err) = result { + debug!( + "check_blockstore_root, slot: {:?}, max root: {:?}, err: {:?}", + slot, + self.blockstore.max_root(), + err + ); + if slot >= self.blockstore.max_root() { + return Err(RpcCustomError::BlockNotAvailable { slot }.into()); + } + if self.blockstore.is_skipped(slot) { + return Err(RpcCustomError::SlotSkipped { slot }.into()); + } + } + Ok(()) + } + + fn check_slot_cleaned_up( + &self, + result: &std::result::Result, + slot: Slot, + ) -> Result<()> { + let first_available_block = self + .blockstore + .get_first_available_block() + .unwrap_or_default(); + let err: Error = RpcCustomError::BlockCleanedUp { + slot, + first_available_block, + } + .into(); + if let Err(BlockstoreError::SlotCleanedUp) = result { + return Err(err); + } + if slot < first_available_block { + return Err(err); + } + Ok(()) + } + + fn check_bigtable_result( + &self, + result: &std::result::Result, + ) -> Result<()> { + if let Err(solana_storage_bigtable::Error::BlockNotFound(slot)) = result { + return Err(RpcCustomError::LongTermStorageSlotSkipped { slot: *slot }.into()); + } + Ok(()) + } + + fn check_blockstore_writes_complete(&self, slot: Slot) -> Result<()> { + if slot + > self + .max_complete_transaction_status_slot + .load(Ordering::SeqCst) + { + Err(RpcCustomError::BlockStatusNotAvailableYet { slot }.into()) + } else { + Ok(()) + } + } + + pub async fn get_block( + &self, + slot: Slot, + config: Option>, + ) -> Result> { + if self.config.enable_rpc_transaction_history { + let config = config + .map(|config| config.convert_to_current()) + .unwrap_or_default(); + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json); + let encoding_options = BlockEncodingOptions { + transaction_details: config.transaction_details.unwrap_or_default(), + show_rewards: config.rewards.unwrap_or(true), + max_supported_transaction_version: config.max_supported_transaction_version, + }; + let commitment = config.commitment.unwrap_or_default(); + check_is_at_least_confirmed(commitment)?; + + // Block is old enough to be finalized + if slot <= self.bank_forks.read().unwrap().root() { + self.check_blockstore_writes_complete(slot)?; + let result = self.blockstore.get_rooted_block(slot, true); + self.check_blockstore_root(&result, slot)?; + let encode_block = |confirmed_block: ConfirmedBlock| -> Result { + let mut encoded_block = confirmed_block + .encode_with_options(encoding, encoding_options) + .map_err(RpcCustomError::from)?; + if slot == 0 { + encoded_block.block_time = Some(self.genesis_creation_time()); + encoded_block.block_height = Some(0); + } + Ok(encoded_block) + }; + if result.is_err() { + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + let bigtable_result = + bigtable_ledger_storage.get_confirmed_block(slot).await; + self.check_bigtable_result(&bigtable_result)?; + return bigtable_result.ok().map(encode_block).transpose(); + } + } + self.check_slot_cleaned_up(&result, slot)?; + return result + .ok() + .map(ConfirmedBlock::from) + .map(encode_block) + .transpose(); + } else if commitment.is_confirmed() { + let result = self.blockstore.get_complete_block(slot, true); + return result + .ok() + .map(ConfirmedBlock::from) + .map(|mut confirmed_block| -> Result { + if confirmed_block.block_time.is_none() + || confirmed_block.block_height.is_none() + { + let r_bank_forks = self.bank_forks.read().unwrap(); + if let Some(bank) = r_bank_forks.get(slot) { + if confirmed_block.block_time.is_none() { + confirmed_block.block_time = Some(bank.clock().unix_timestamp); + } + if confirmed_block.block_height.is_none() { + confirmed_block.block_height = Some(bank.block_height()); + } + } + } + + Ok(confirmed_block + .encode_with_options(encoding, encoding_options) + .map_err(RpcCustomError::from)?) + }) + .transpose(); + } + } else { + return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); + } + Err(RpcCustomError::BlockNotAvailable { slot }.into()) + } + + pub async fn get_blocks( + &self, + start_slot: Slot, + end_slot: Option, + config: Option, + ) -> Result> { + let config = config.unwrap_or_default(); + let commitment = config.commitment.unwrap_or_default(); + check_is_at_least_confirmed(commitment)?; + + let current_slot = self.get_bank_with_config(config)?.slot(); + let end_slot = min( + end_slot.unwrap_or_else(|| start_slot.saturating_add(MAX_GET_CONFIRMED_BLOCKS_RANGE)), + current_slot, + ); + if end_slot < start_slot { + return Ok(vec![]); + } + if end_slot - start_slot > MAX_GET_CONFIRMED_BLOCKS_RANGE { + return Err(Error::invalid_params(format!( + "Slot range too large; max {MAX_GET_CONFIRMED_BLOCKS_RANGE}" + ))); + } + + let lowest_blockstore_slot = self + .blockstore + .get_first_available_block() + .unwrap_or_default(); + if start_slot < lowest_blockstore_slot { + // If the starting slot is lower than what's available in blockstore assume the entire + // [start_slot..end_slot] can be fetched from BigTable. This range should not ever run + // into unfinalized confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + return bigtable_ledger_storage + .get_confirmed_blocks(start_slot, (end_slot - start_slot) as usize + 1) // increment limit by 1 to ensure returned range is inclusive of both start_slot and end_slot + .await + .map(|mut bigtable_blocks| { + bigtable_blocks.retain(|&slot| slot <= end_slot); + bigtable_blocks + }) + .map_err(|_| { + Error::invalid_params( + "BigTable query failed (maybe timeout due to too large range?)" + .to_string(), + ) + }); + } + } + + let blocks: Vec<_> = self + .blockstore + .rooted_slot_iterator(max(start_slot, lowest_blockstore_slot)) + .map_err(|_| Error::internal_error())? + .filter(|&slot| slot <= end_slot && slot <= current_slot) + .collect(); + + Ok(blocks) + } + + pub async fn get_blocks_with_limit( + &self, + start_slot: Slot, + limit: usize, + config: Option, + ) -> Result> { + let config = config.unwrap_or_default(); + let commitment = config.commitment.unwrap_or_default(); + check_is_at_least_confirmed(commitment)?; + + if limit > MAX_GET_CONFIRMED_BLOCKS_RANGE as usize { + return Err(Error::invalid_params(format!( + "Limit too large; max {MAX_GET_CONFIRMED_BLOCKS_RANGE}" + ))); + } + + let lowest_blockstore_slot = self + .blockstore + .get_first_available_block() + .unwrap_or_default(); + + if start_slot < lowest_blockstore_slot { + // If the starting slot is lower than what's available in blockstore assume the entire + // range can be fetched from BigTable. This range should not ever run into unfinalized + // confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + return Ok(bigtable_ledger_storage + .get_confirmed_blocks(start_slot, limit) + .await + .unwrap_or_default()); + } + } + + let current_slot = self.get_bank_with_config(config)?.slot(); + // Finalized blocks + let blocks: Vec<_> = self + .blockstore + .rooted_slot_iterator(max(start_slot, lowest_blockstore_slot)) + .map_err(|_| Error::internal_error())? + .take(limit) + .filter(|&slot| slot <= current_slot) + .collect(); + + Ok(blocks) + } + + pub async fn get_block_time(&self, slot: Slot) -> Result> { + if slot == 0 { + return Ok(Some(self.genesis_creation_time())); + } + if slot <= self.bank_forks.read().unwrap().root() { + let result = self.blockstore.get_rooted_block_time(slot); + self.check_blockstore_root(&result, slot)?; + if result.is_err() { + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; + self.check_bigtable_result(&bigtable_result)?; + return Ok(bigtable_result + .ok() + .and_then(|confirmed_block| confirmed_block.block_time)); + } + } + self.check_slot_cleaned_up(&result, slot)?; + Ok(result.ok()) + } else if let Some(bank) = self.bank_forks.read().unwrap().get(slot) { + Ok(Some(bank.clock().unix_timestamp)) + } else { + Err(RpcCustomError::BlockNotAvailable { slot }.into()) + } + } + + pub fn get_signature_status( + &self, + signature: Signature, + commitment: Option, + ) -> Result>> { + let bank = self.bank(commitment); + Ok(bank + .get_signature_status_slot(&signature) + .map(|(_, status)| status)) + } + + pub async fn get_signature_statuses( + &self, + signatures: Vec, + config: Option, + ) -> Result>>> { + let mut statuses: Vec> = vec![]; + + let search_transaction_history = config + .map(|x| x.search_transaction_history) + .unwrap_or(false); + let bank = self.bank(None); + + if search_transaction_history && !self.config.enable_rpc_transaction_history { + return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); + } + + for signature in signatures { + let status = if let Some(status) = self.get_transaction_status(signature, &bank) { + Some(status) + } else if self.config.enable_rpc_transaction_history && search_transaction_history { + if let Some(status) = self + .blockstore + .get_rooted_transaction_status(signature) + .map_err(|_| Error::internal_error())? + .filter(|(slot, _status_meta)| *slot <= self.bank_forks.read().unwrap().root()) + .map(|(slot, status_meta)| { + let err = status_meta.status.clone().err(); + TransactionStatus { + slot, + status: status_meta.status, + confirmations: None, + err, + confirmation_status: Some(TransactionConfirmationStatus::Finalized), + } + }) + { + Some(status) + } else if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + bigtable_ledger_storage + .get_signature_status(&signature) + .await + .map(Some) + .unwrap_or(None) + } else { + None + } + } else { + None + }; + statuses.push(status); + } + Ok(new_response(&bank, statuses)) + } + + fn get_transaction_status( + &self, + signature: Signature, + bank: &Bank, + ) -> Option { + let (slot, status) = bank.get_signature_status_slot(&signature)?; + + let err = status.clone().err(); + Some(TransactionStatus { + slot, + status, + confirmations: None, + err, + confirmation_status: Some(TransactionConfirmationStatus::Finalized), + }) + } + + pub async fn get_transaction( + &self, + signature: Signature, + config: Option>, + ) -> Result> { + let config = config + .map(|config| config.convert_to_current()) + .unwrap_or_default(); + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json); + let max_supported_transaction_version = config.max_supported_transaction_version; + let commitment = config.commitment.unwrap_or_default(); + check_is_at_least_confirmed(commitment)?; + + if self.config.enable_rpc_transaction_history { + let confirmed_bank = self.bank(None); + let confirmed_transaction = if commitment.is_confirmed() { + let highest_confirmed_slot = confirmed_bank.slot(); + self.blockstore + .get_complete_transaction(signature, highest_confirmed_slot) + } else { + self.blockstore.get_rooted_transaction(signature) + }; + + let encode_transaction = + |confirmed_tx_with_meta: ConfirmedTransactionWithStatusMeta| -> Result { + Ok(confirmed_tx_with_meta.encode(encoding, max_supported_transaction_version).map_err(RpcCustomError::from)?) + }; + + match confirmed_transaction.unwrap_or(None) { + Some(mut confirmed_transaction) => { + if commitment.is_confirmed() + && confirmed_bank // should be redundant + .status_cache_ancestors() + .contains(&confirmed_transaction.slot) + { + if confirmed_transaction.block_time.is_none() { + let r_bank_forks = self.bank_forks.read().unwrap(); + confirmed_transaction.block_time = r_bank_forks + .get(confirmed_transaction.slot) + .map(|bank| bank.clock().unix_timestamp); + } + return Ok(Some(encode_transaction(confirmed_transaction)?)); + } + + if confirmed_transaction.slot <= self.bank_forks.read().unwrap().root() { + return Ok(Some(encode_transaction(confirmed_transaction)?)); + } + } + None => { + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + return bigtable_ledger_storage + .get_confirmed_transaction(&signature) + .await + .unwrap_or(None) + .map(encode_transaction) + .transpose(); + } + } + } + } else { + return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); + } + Ok(None) + } + + pub fn get_confirmed_signatures_for_address( + &self, + pubkey: Pubkey, + start_slot: Slot, + end_slot: Slot, + ) -> Vec { + if self.config.enable_rpc_transaction_history { + // TODO: Add bigtable_ledger_storage support as a part of + // https://github.com/solana-labs/solana/pull/10928 + // Note: get_confirmed_signatures_for_address is deprecated and always returns an empty + // vector + self.blockstore + .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) + .unwrap_or_default() + } else { + vec![] + } + } + + pub async fn get_signatures_for_address( + &self, + address: Pubkey, + before: Option, + until: Option, + mut limit: usize, + config: RpcContextConfig, + ) -> Result> { + let commitment = config.commitment.unwrap_or_default(); + check_is_at_least_confirmed(commitment)?; + + if self.config.enable_rpc_transaction_history { + let highest_slot = self.get_bank_with_config(config)?.slot(); + + let SignatureInfosForAddress { + infos: mut results, + found_before, + } = self + .blockstore + .get_confirmed_signatures_for_address2(address, highest_slot, before, until, limit) + .map_err(|err| Error::invalid_params(format!("{err}")))?; + + let map_results = |results: Vec| { + results + .into_iter() + .map(|x| { + let mut item: RpcConfirmedTransactionStatusWithSignature = x.into(); + item.confirmation_status = Some(TransactionConfirmationStatus::Finalized); + if item.block_time.is_none() { + item.block_time = self + .bank_forks + .read() + .unwrap() + .get(item.slot) + .map(|bank| bank.clock().unix_timestamp); + } + item + }) + .collect() + }; + + if results.len() < limit { + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + let mut bigtable_before = before; + if !results.is_empty() { + limit -= results.len(); + bigtable_before = results.last().map(|x| x.signature); + } + + // If the oldest address-signature found in Blockstore has not yet been + // uploaded to long-term storage, modify the storage query to return all latest + // signatures to prevent erroring on RowNotFound. This can race with upload. + if found_before && bigtable_before.is_some() { + match bigtable_ledger_storage + .get_signature_status(&bigtable_before.unwrap()) + .await + { + Err(StorageError::SignatureNotFound) => { + bigtable_before = None; + } + Err(err) => { + warn!("{:?}", err); + return Ok(map_results(results)); + } + Ok(_) => {} + } + } + + let bigtable_results = bigtable_ledger_storage + .get_confirmed_signatures_for_address( + &address, + bigtable_before.as_ref(), + until.as_ref(), + limit, + ) + .await; + match bigtable_results { + Ok(bigtable_results) => { + let results_set: HashSet<_> = + results.iter().map(|result| result.signature).collect(); + for (bigtable_result, _) in bigtable_results { + // In the upload race condition, latest address-signatures in + // long-term storage may include original `before` signature... + if before != Some(bigtable_result.signature) + // ...or earlier Blockstore signatures + && !results_set.contains(&bigtable_result.signature) + { + results.push(bigtable_result); + } + } + } + Err(err) => { + warn!("{:?}", err); + } + } + } + } + + Ok(map_results(results)) + } else { + Err(RpcCustomError::TransactionHistoryNotAvailable.into()) + } + } + + pub async fn get_first_available_block(&self) -> Slot { + let slot = self + .blockstore + .get_first_available_block() + .unwrap_or_default(); + + if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + let bigtable_slot = bigtable_ledger_storage + .get_first_available_block() + .await + .unwrap_or(None) + .unwrap_or(slot); + + if bigtable_slot < slot { + return bigtable_slot; + } + } + slot + } + + pub fn get_token_account_balance( + &self, + pubkey: &Pubkey, + commitment: Option, + ) -> Result> { + let bank = self.bank(commitment); + let account = bank.get_account(pubkey).ok_or_else(|| { + Error::invalid_params("Invalid param: could not find account".to_string()) + })?; + + if !is_known_spl_token_id(account.owner()) { + return Err(Error::invalid_params( + "Invalid param: not a Token account".to_string(), + )); + } + let token_account = StateWithExtensions::::unpack(account.data()) + .map_err(|_| Error::invalid_params("Invalid param: not a Token account".to_string()))?; + let mint = &Pubkey::from_str(&token_account.base.mint.to_string()) + .expect("Token account mint should be convertible to Pubkey"); + let (_, data) = get_mint_owner_and_additional_data(&bank, mint)?; + let balance = token_amount_to_ui_amount_v2(token_account.base.amount, &data); + Ok(new_response(&bank, balance)) + } + + pub fn get_token_supply( + &self, + mint: &Pubkey, + commitment: Option, + ) -> Result> { + let bank = self.bank(commitment); + let mint_account = bank.get_account(mint).ok_or_else(|| { + Error::invalid_params("Invalid param: could not find account".to_string()) + })?; + if !is_known_spl_token_id(mint_account.owner()) { + return Err(Error::invalid_params( + "Invalid param: not a Token mint".to_string(), + )); + } + let mint = StateWithExtensions::::unpack(mint_account.data()).map_err(|_| { + Error::invalid_params("Invalid param: mint could not be unpacked".to_string()) + })?; + + let interest_bearing_config = mint + .get_extension::() + .map(|x| (*x, bank.clock().unix_timestamp)) + .ok(); + + let supply = token_amount_to_ui_amount_v2( + mint.base.supply, + &SplTokenAdditionalData { + decimals: mint.base.decimals, + interest_bearing_config, + }, + ); + Ok(new_response(&bank, supply)) + } + + pub fn get_token_largest_accounts( + &self, + mint: &Pubkey, + commitment: Option, + ) -> Result>> { + let bank = self.bank(commitment); + let (mint_owner, data) = get_mint_owner_and_additional_data(&bank, mint)?; + if !is_known_spl_token_id(&mint_owner) { + return Err(Error::invalid_params( + "Invalid param: not a Token mint".to_string(), + )); + } + + let mut token_balances = + BinaryHeap::>::with_capacity(NUM_LARGEST_ACCOUNTS); + for (address, account) in + self.get_filtered_spl_token_accounts_by_mint(&bank, &mint_owner, mint, vec![], true)? + { + let amount = StateWithExtensions::::unpack(account.data()) + .map(|account| account.base.amount) + .unwrap_or(0); + + let new_entry = (amount, address); + if token_balances.len() >= NUM_LARGEST_ACCOUNTS { + let Reverse(entry) = token_balances + .peek() + .expect("BinaryHeap::peek should succeed when len > 0"); + if *entry >= new_entry { + continue; + } + token_balances.pop(); + } + token_balances.push(Reverse(new_entry)); + } + + let token_balances = token_balances + .into_sorted_vec() + .into_iter() + .map(|Reverse((amount, address))| { + Ok(RpcTokenAccountBalance { + address: address.to_string(), + amount: token_amount_to_ui_amount_v2(amount, &data), + }) + }) + .collect::>>()?; + + Ok(new_response(&bank, token_balances)) + } + + pub fn get_token_accounts_by_owner( + &self, + owner: &Pubkey, + token_account_filter: TokenAccountsFilter, + config: Option, + sort_results: bool, + ) -> Result>> { + let RpcAccountInfoConfig { + encoding, + data_slice: data_slice_config, + commitment, + min_context_slot, + } = config.unwrap_or_default(); + let bank = self.get_bank_with_config(RpcContextConfig { + commitment, + min_context_slot, + })?; + let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); + let (token_program_id, mint) = get_token_program_id_and_mint(&bank, token_account_filter)?; + + let mut filters = vec![]; + if let Some(mint) = mint { + // Optional filter on Mint address + filters.push(RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + 0, + mint.to_bytes().into(), + ))); + } + + let keyed_accounts = self.get_filtered_spl_token_accounts_by_owner( + &bank, + &token_program_id, + owner, + filters, + sort_results, + )?; + let accounts = if encoding == UiAccountEncoding::JsonParsed { + get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() + } else { + keyed_accounts + .into_iter() + .map(|(pubkey, account)| { + Ok(RpcKeyedAccount { + pubkey: pubkey.to_string(), + account: encode_account(&account, &pubkey, encoding, data_slice_config)?, + }) + }) + .collect::>>()? + }; + Ok(new_response(&bank, accounts)) + } + + pub fn get_token_accounts_by_delegate( + &self, + delegate: &Pubkey, + token_account_filter: TokenAccountsFilter, + config: Option, + sort_results: bool, + ) -> Result>> { + let RpcAccountInfoConfig { + encoding, + data_slice: data_slice_config, + commitment, + min_context_slot, + } = config.unwrap_or_default(); + let bank = self.get_bank_with_config(RpcContextConfig { + commitment, + min_context_slot, + })?; + let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); + let (token_program_id, mint) = get_token_program_id_and_mint(&bank, token_account_filter)?; + + let mut filters = vec![ + // Filter on Delegate is_some() + RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + 72, + bincode::serialize(&1u32).unwrap(), + )), + // Filter on Delegate address + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(76, delegate.to_bytes().into())), + ]; + // Optional filter on Mint address, uses mint account index for scan + let keyed_accounts = if let Some(mint) = mint { + self.get_filtered_spl_token_accounts_by_mint( + &bank, + &token_program_id, + &mint, + filters, + sort_results, + )? + } else { + // Filter on Token Account state + filters.push(RpcFilterType::TokenAccountState); + self.get_filtered_program_accounts(&bank, &token_program_id, filters, sort_results)? + }; + let accounts = if encoding == UiAccountEncoding::JsonParsed { + get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() + } else { + keyed_accounts + .into_iter() + .map(|(pubkey, account)| { + Ok(RpcKeyedAccount { + pubkey: pubkey.to_string(), + account: encode_account(&account, &pubkey, encoding, data_slice_config)?, + }) + }) + .collect::>>()? + }; + Ok(new_response(&bank, accounts)) + } + + /// Use a set of filters to get an iterator of keyed program accounts from a bank + fn get_filtered_program_accounts( + &self, + bank: &Bank, + program_id: &Pubkey, + mut filters: Vec, + sort_results: bool, + ) -> RpcCustomResult> { + optimize_filters(&mut filters); + let filter_closure = |account: &AccountSharedData| { + filters + .iter() + .all(|filter_type| filter_allows(filter_type, account)) + }; + if self + .config + .account_indexes + .contains(&AccountIndex::ProgramId) + { + if !self.config.account_indexes.include_key(program_id) { + return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { + index_key: program_id.to_string(), + }); + } + Ok(bank + .get_filtered_indexed_accounts( + &IndexKey::ProgramId(*program_id), + |account| { + // The program-id account index checks for Account owner on inclusion. However, due + // to the current AccountsDb implementation, an account may remain in storage as a + // zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later + // updates. We include the redundant filters here to avoid returning these + // accounts. + account.owner() == program_id && filter_closure(account) + }, + &ScanConfig::new(!sort_results), + bank.byte_limit_for_scans(), + ) + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })?) + } else { + // this path does not need to provide a mb limit because we only want to support secondary indexes + Ok(bank + .get_filtered_program_accounts( + program_id, + filter_closure, + &ScanConfig::new(!sort_results), + ) + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })?) + } + } + + /// Get an iterator of spl-token accounts by owner address + fn get_filtered_spl_token_accounts_by_owner( + &self, + bank: &Bank, + program_id: &Pubkey, + owner_key: &Pubkey, + mut filters: Vec, + sort_results: bool, + ) -> RpcCustomResult> { + // The by-owner accounts index checks for Token Account state and Owner address on + // inclusion. However, due to the current AccountsDb implementation, an account may remain + // in storage as a zero-lamport AccountSharedData::Default() after being wiped and reinitialized in + // later updates. We include the redundant filters here to avoid returning these accounts. + // + // Filter on Token Account state + filters.push(RpcFilterType::TokenAccountState); + // Filter on Owner address + filters.push(RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + SPL_TOKEN_ACCOUNT_OWNER_OFFSET, + owner_key.to_bytes().into(), + ))); + + if self + .config + .account_indexes + .contains(&AccountIndex::SplTokenOwner) + { + if !self.config.account_indexes.include_key(owner_key) { + return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { + index_key: owner_key.to_string(), + }); + } + Ok(bank + .get_filtered_indexed_accounts( + &IndexKey::SplTokenOwner(*owner_key), + |account| { + account.owner() == program_id + && filters + .iter() + .all(|filter_type| filter_allows(filter_type, account)) + }, + &ScanConfig::new(!sort_results), + bank.byte_limit_for_scans(), + ) + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })?) + } else { + self.get_filtered_program_accounts(bank, program_id, filters, sort_results) + } + } + + /// Get an iterator of spl-token accounts by mint address + fn get_filtered_spl_token_accounts_by_mint( + &self, + bank: &Bank, + program_id: &Pubkey, + mint_key: &Pubkey, + mut filters: Vec, + sort_results: bool, + ) -> RpcCustomResult> { + // The by-mint accounts index checks for Token Account state and Mint address on inclusion. + // However, due to the current AccountsDb implementation, an account may remain in storage + // as be zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later + // updates. We include the redundant filters here to avoid returning these accounts. + // + // Filter on Token Account state + filters.push(RpcFilterType::TokenAccountState); + // Filter on Mint address + filters.push(RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + SPL_TOKEN_ACCOUNT_MINT_OFFSET, + mint_key.to_bytes().into(), + ))); + if self + .config + .account_indexes + .contains(&AccountIndex::SplTokenMint) + { + if !self.config.account_indexes.include_key(mint_key) { + return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { + index_key: mint_key.to_string(), + }); + } + Ok(bank + .get_filtered_indexed_accounts( + &IndexKey::SplTokenMint(*mint_key), + |account| { + account.owner() == program_id + && filters + .iter() + .all(|filter_type| filter_allows(filter_type, account)) + }, + &ScanConfig::new(!sort_results), + bank.byte_limit_for_scans(), + ) + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })?) + } else { + self.get_filtered_program_accounts(bank, program_id, filters, sort_results) + } + } + + fn get_latest_blockhash(&self, config: RpcContextConfig) -> Result> { + let bank = self.get_bank_with_config(config)?; + let blockhash = bank.last_blockhash(); + let last_valid_block_height = bank + .get_blockhash_last_valid_block_height(&blockhash) + .expect("bank blockhash queue should contain blockhash"); + Ok(new_response( + &bank, + RpcBlockhash { + blockhash: blockhash.to_string(), + last_valid_block_height, + }, + )) + } + + fn is_blockhash_valid( + &self, + blockhash: &Hash, + config: RpcContextConfig, + ) -> Result> { + let bank = self.get_bank_with_config(config)?; + let is_valid = bank.is_blockhash_valid(blockhash); + Ok(new_response(&bank, is_valid)) + } + + fn get_stake_minimum_delegation(&self, config: RpcContextConfig) -> Result> { + let bank = self.get_bank_with_config(config)?; + let stake_minimum_delegation = + solana_stake_program::get_minimum_delegation(&bank.feature_set); + Ok(new_response(&bank, stake_minimum_delegation)) + } + + fn get_recent_prioritization_fees( + &self, + pubkeys: Vec, + ) -> Result> { + Ok(self + .prioritization_fee_cache + .get_prioritization_fees(&pubkeys) + .into_iter() + .map(|(slot, prioritization_fee)| RpcPrioritizationFee { + slot, + prioritization_fee, + }) + .collect()) + } +} + +fn err_transfer_fn_with_loc( + loc: &'static str, +) -> Box Error> { + Box::new(move |error: E| Error { + code: ErrorCode::InternalError, + message: format!("[{}] {}", loc, error), + data: None, + }) +} + +fn optimize_filters(filters: &mut [RpcFilterType]) { + filters.iter_mut().for_each(|filter_type| { + if let RpcFilterType::Memcmp(compare) = filter_type { + if let Err(err) = compare.convert_to_raw_bytes() { + // All filters should have been previously verified + warn!("Invalid filter: bytes could not be decoded, {err}"); + } + } + }) +} + +fn verify_transaction( + transaction: &SanitizedTransaction, + feature_set: &Arc, +) -> Result<()> { + #[allow(clippy::question_mark)] + if transaction.verify().is_err() { + return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); + } + + if let Err(e) = transaction.verify_precompiles(feature_set) { + return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into()); + } + + Ok(()) +} + +fn verify_filter(input: &RpcFilterType) -> Result<()> { + input + .verify() + .map_err(|e| Error::invalid_params(format!("Invalid param: {e:?}"))) +} + +pub fn verify_pubkey(input: &str) -> Result { + input + .parse() + .map_err(|e| Error::invalid_params(format!("Invalid param: {e:?}"))) +} + +fn verify_hash(input: &str) -> Result { + input + .parse() + .map_err(|e| Error::invalid_params(format!("Invalid param: {e:?}"))) +} + +fn verify_signature(input: &str) -> Result { + input + .parse() + .map_err(|e| Error::invalid_params(format!("Invalid param: {e:?}"))) +} + +fn verify_token_account_filter( + token_account_filter: RpcTokenAccountsFilter, +) -> Result { + match token_account_filter { + RpcTokenAccountsFilter::Mint(mint_str) => { + let mint = verify_pubkey(&mint_str)?; + Ok(TokenAccountsFilter::Mint(mint)) + } + RpcTokenAccountsFilter::ProgramId(program_id_str) => { + let program_id = verify_pubkey(&program_id_str)?; + Ok(TokenAccountsFilter::ProgramId(program_id)) + } + } +} + +fn verify_and_parse_signatures_for_address_params( + address: String, + before: Option, + until: Option, + limit: Option, +) -> Result<(Pubkey, Option, Option, usize)> { + let address = verify_pubkey(&address)?; + let before = before + .map(|ref before| verify_signature(before)) + .transpose()?; + let until = until.map(|ref until| verify_signature(until)).transpose()?; + let limit = limit.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT); + + if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT { + return Err(Error::invalid_params(format!( + "Invalid limit; max {MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT}" + ))); + } + Ok((address, before, until, limit)) +} + +pub(crate) fn check_is_at_least_confirmed(commitment: CommitmentConfig) -> Result<()> { + if !commitment.is_at_least_confirmed() { + return Err(Error::invalid_params( + "Method does not support commitment below `confirmed`", + )); + } + Ok(()) +} + +fn get_encoded_account( + bank: &Bank, + pubkey: &Pubkey, + encoding: UiAccountEncoding, + data_slice: Option, + // only used for simulation results + overwrite_accounts: Option<&HashMap>, +) -> Result> { + match crate::account_resolver::get_account_from_overwrites_or_bank( + pubkey, + bank, + overwrite_accounts, + ) { + Some(account) => { + let response = if is_known_spl_token_id(account.owner()) + && encoding == UiAccountEncoding::JsonParsed + { + get_parsed_token_account(bank, pubkey, account, overwrite_accounts) + } else { + encode_account(&account, pubkey, encoding, data_slice)? + }; + Ok(Some(response)) + } + None => Ok(None), + } +} + +fn encode_account( + account: &T, + pubkey: &Pubkey, + encoding: UiAccountEncoding, + data_slice: Option, +) -> Result { + if (encoding == UiAccountEncoding::Binary || encoding == UiAccountEncoding::Base58) + && data_slice + .map(|s| min(s.length, account.data().len().saturating_sub(s.offset))) + .unwrap_or(account.data().len()) + > MAX_BASE58_BYTES + { + let message = format!("Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please use Base64 encoding."); + Err(Error { + code: ErrorCode::InvalidRequest, + message, + data: None, + }) + } else { + Ok(UiAccount::encode( + pubkey, account, encoding, None, data_slice, + )) + } +} + +/// Analyze custom filters to determine if the result will be a subset of spl-token accounts by +/// owner. +/// NOTE: `optimize_filters()` should almost always be called before using this method because of +/// the requirement that `Memcmp::raw_bytes_as_ref().is_some()`. +fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option { + if !is_known_spl_token_id(program_id) { + return None; + } + let mut data_size_filter: Option = None; + let mut memcmp_filter: Option<&[u8]> = None; + let mut owner_key: Option = None; + let mut incorrect_owner_len: Option = None; + let mut token_account_state_filter = false; + let account_packed_len = TokenAccount::get_packed_len(); + for filter in filters { + match filter { + RpcFilterType::DataSize(size) => data_size_filter = Some(*size), + RpcFilterType::Memcmp(memcmp) => { + let offset = memcmp.offset(); + if let Some(bytes) = memcmp.raw_bytes_as_ref() { + if offset == account_packed_len && *program_id == token_2022::id() { + memcmp_filter = Some(bytes); + } else if offset == SPL_TOKEN_ACCOUNT_OWNER_OFFSET { + if bytes.len() == PUBKEY_BYTES { + owner_key = Pubkey::try_from(bytes).ok(); + } else { + incorrect_owner_len = Some(bytes.len()); + } + } + } + } + RpcFilterType::TokenAccountState => token_account_state_filter = true, + } + } + if data_size_filter == Some(account_packed_len as u64) + || memcmp_filter == Some(&[ACCOUNTTYPE_ACCOUNT]) + || token_account_state_filter + { + if let Some(incorrect_owner_len) = incorrect_owner_len { + info!( + "Incorrect num bytes ({:?}) provided for spl_token_owner_filter", + incorrect_owner_len + ); + } + owner_key + } else { + debug!("spl_token program filters do not match by-owner index requisites"); + None + } +} + +/// Analyze custom filters to determine if the result will be a subset of spl-token accounts by +/// mint. +/// NOTE: `optimize_filters()` should almost always be called before using this method because of +/// the requirement that `Memcmp::raw_bytes_as_ref().is_some()`. +fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option { + if !is_known_spl_token_id(program_id) { + return None; + } + let mut data_size_filter: Option = None; + let mut memcmp_filter: Option<&[u8]> = None; + let mut mint: Option = None; + let mut incorrect_mint_len: Option = None; + let mut token_account_state_filter = false; + let account_packed_len = TokenAccount::get_packed_len(); + for filter in filters { + match filter { + RpcFilterType::DataSize(size) => data_size_filter = Some(*size), + RpcFilterType::Memcmp(memcmp) => { + let offset = memcmp.offset(); + if let Some(bytes) = memcmp.raw_bytes_as_ref() { + if offset == account_packed_len && *program_id == token_2022::id() { + memcmp_filter = Some(bytes); + } else if offset == SPL_TOKEN_ACCOUNT_MINT_OFFSET { + if bytes.len() == PUBKEY_BYTES { + mint = Pubkey::try_from(bytes).ok(); + } else { + incorrect_mint_len = Some(bytes.len()); + } + } + } + } + RpcFilterType::TokenAccountState => token_account_state_filter = true, + } + } + if data_size_filter == Some(account_packed_len as u64) + || memcmp_filter == Some(&[ACCOUNTTYPE_ACCOUNT]) + || token_account_state_filter + { + if let Some(incorrect_mint_len) = incorrect_mint_len { + info!( + "Incorrect num bytes ({:?}) provided for spl_token_mint_filter", + incorrect_mint_len + ); + } + mint + } else { + debug!("spl_token program filters do not match by-mint index requisites"); + None + } +} + +/// Analyze a passed Pubkey that may be a Token program id or Mint address to determine the program +/// id and optional Mint +fn get_token_program_id_and_mint( + bank: &Bank, + token_account_filter: TokenAccountsFilter, +) -> Result<(Pubkey, Option)> { + match token_account_filter { + TokenAccountsFilter::Mint(mint) => { + let (mint_owner, _) = get_mint_owner_and_additional_data(bank, &mint)?; + if !is_known_spl_token_id(&mint_owner) { + return Err(Error::invalid_params( + "Invalid param: not a Token mint".to_string(), + )); + } + Ok((mint_owner, Some(mint))) + } + TokenAccountsFilter::ProgramId(program_id) => { + if is_known_spl_token_id(&program_id) { + Ok((program_id, None)) + } else { + Err(Error::invalid_params( + "Invalid param: unrecognized Token program id".to_string(), + )) + } + } + } +} + +fn _send_transaction( + meta: JsonRpcRequestProcessor, + signature: Signature, + transaction: SanitizedTransaction, + _last_valid_block_height: u64, + _durable_nonce_info: Option<(Pubkey, Hash)>, +) -> Result { + // TODO: implement a transaction cache for RPC, to remove duplicated and invalid transactions + // let transaction_info = + // TransactionInfo::new(transaction, last_valid_block_height, durable_nonce_info); + meta.tx_channel + .0 + .send(transaction) + .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err)); + + Ok(signature.to_string()) +} + +// Minimal RPC interface that known validators are expected to provide +pub mod rpc_minimal { + use super::*; + #[rpc] + pub trait Minimal { + type Metadata; + + #[rpc(meta, name = "getBalance")] + fn get_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "getEpochInfo")] + fn get_epoch_info( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result; + + #[rpc(meta, name = "getGenesisHash")] + fn get_genesis_hash(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getHealth")] + fn get_health(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getIdentity")] + fn get_identity(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getSlot")] + fn get_slot(&self, meta: Self::Metadata, config: Option) -> Result; + + #[rpc(meta, name = "getBlockHeight")] + fn get_block_height( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result; + + #[rpc(meta, name = "getHighestSnapshotSlot")] + fn get_highest_snapshot_slot(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getTransactionCount")] + fn get_transaction_count( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result; + + #[rpc(meta, name = "getVersion")] + fn get_version(&self, meta: Self::Metadata) -> Result; + + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + #[rpc(meta, name = "getVoteAccounts")] + fn get_vote_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result; + + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + #[rpc(meta, name = "getLeaderSchedule")] + fn get_leader_schedule( + &self, + meta: Self::Metadata, + options: Option, + config: Option, + ) -> Result>; + } + + pub struct MinimalImpl; + impl Minimal for MinimalImpl { + type Metadata = JsonRpcRequestProcessor; + + fn get_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result> { + debug!("get_balance rpc request received: {:?}", pubkey_str); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_balance(&pubkey, config.unwrap_or_default()) + } + + fn get_epoch_info( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result { + debug!("get_epoch_info rpc request received"); + let bank = meta.get_bank_with_config(config.unwrap_or_default())?; + Ok(bank.get_epoch_info()) + } + + fn get_genesis_hash(&self, meta: Self::Metadata) -> Result { + debug!("get_genesis_hash rpc request received"); + Ok(meta.genesis_hash.to_string()) + } + + fn get_health(&self, _meta: Self::Metadata) -> Result { + // always health + Ok("ok".to_string()) + } + + fn get_identity(&self, meta: Self::Metadata) -> Result { + Ok(RpcIdentity { + identity: meta.bank(None).collector_id().to_string(), + }) + } + + fn get_slot(&self, meta: Self::Metadata, config: Option) -> Result { + debug!("get_slot rpc request received"); + meta.get_slot(config.unwrap_or_default()) + } + + fn get_block_height( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result { + debug!("get_block_height rpc request received"); + meta.get_block_height(config.unwrap_or_default()) + } + + fn get_highest_snapshot_slot(&self, meta: Self::Metadata) -> Result { + debug!("get_highest_snapshot_slot rpc request received"); + + if meta.snapshot_config.is_none() { + return Err(RpcCustomError::NoSnapshot.into()); + } + + let (full_snapshot_archives_dir, incremental_snapshot_archives_dir) = meta + .snapshot_config + .map(|snapshot_config| { + ( + snapshot_config.full_snapshot_archives_dir, + snapshot_config.incremental_snapshot_archives_dir, + ) + }) + .unwrap(); + + let full_snapshot_slot = + snapshot_utils::get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir) + .ok_or(RpcCustomError::NoSnapshot)?; + let incremental_snapshot_slot = + snapshot_utils::get_highest_incremental_snapshot_archive_slot( + incremental_snapshot_archives_dir, + full_snapshot_slot, + ); + + Ok(RpcSnapshotSlotInfo { + full: full_snapshot_slot, + incremental: incremental_snapshot_slot, + }) + } + + fn get_transaction_count( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result { + debug!("get_transaction_count rpc request received"); + meta.get_transaction_count(config.unwrap_or_default()) + } + + fn get_version(&self, _: Self::Metadata) -> Result { + debug!("get_version rpc request received"); + let version = solana_version::Version::default(); + Ok(RpcVersionInfo { + solana_core: version.to_string(), + feature_set: Some(version.feature_set), + }) + } + + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + fn get_vote_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result { + debug!("get_vote_accounts rpc request received"); + meta.get_vote_accounts(config) + } + + // TODO: Refactor `agave-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + fn get_leader_schedule( + &self, + _meta: Self::Metadata, + _options: Option, + _config: Option, + ) -> Result> { + // NOTE: not supported + Ok(None) + } + } +} + +// RPC interface that only depends on immediate Bank data +// Expected to be provided by API nodes +pub mod rpc_bank { + use super::*; + use solana_rpc_client_api::request::MAX_GET_SLOT_LEADERS; + #[rpc] + pub trait BankData { + type Metadata; + + #[rpc(meta, name = "getMinimumBalanceForRentExemption")] + fn get_minimum_balance_for_rent_exemption( + &self, + meta: Self::Metadata, + data_len: usize, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getInflationGovernor")] + fn get_inflation_governor( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getInflationRate")] + fn get_inflation_rate(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getEpochSchedule")] + fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getSlotLeader")] + fn get_slot_leader( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result; + + #[rpc(meta, name = "getSlotLeaders")] + fn get_slot_leaders( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: u64, + ) -> Result>; + + #[rpc(meta, name = "getBlockProduction")] + fn get_block_production( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>; + } + + pub struct BankDataImpl; + impl BankData for BankDataImpl { + type Metadata = JsonRpcRequestProcessor; + + fn get_minimum_balance_for_rent_exemption( + &self, + meta: Self::Metadata, + data_len: usize, + commitment: Option, + ) -> Result { + debug!( + "get_minimum_balance_for_rent_exemption rpc request received: {:?}", + data_len + ); + if data_len as u64 > system_instruction::MAX_PERMITTED_DATA_LENGTH { + return Err(Error::invalid_request()); + } + Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) + } + + fn get_inflation_governor( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_inflation_governor rpc request received"); + Ok(meta.get_inflation_governor(commitment)) + } + + fn get_inflation_rate(&self, meta: Self::Metadata) -> Result { + debug!("get_inflation_rate rpc request received"); + Ok(meta.get_inflation_rate()) + } + + fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result { + debug!("get_epoch_schedule rpc request received"); + Ok(meta.get_epoch_schedule()) + } + + fn get_slot_leader( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result { + debug!("get_slot_leader rpc request received"); + meta.get_slot_leader(config.unwrap_or_default()) + } + + fn get_slot_leaders( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: u64, + ) -> Result> { + debug!( + "get_slot_leaders rpc request received (start: {} limit: {})", + start_slot, limit + ); + + let limit = limit as usize; + if limit > MAX_GET_SLOT_LEADERS { + return Err(Error::invalid_params(format!( + "Invalid limit; max {MAX_GET_SLOT_LEADERS}" + ))); + } + + Ok(meta + .get_slot_leaders(None, start_slot, limit)? + .into_iter() + .map(|identity| identity.to_string()) + .collect()) + } + + fn get_block_production( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + debug!("get_block_production rpc request received"); + + let config = config.unwrap_or_default(); + let filter_by_identity = if let Some(ref identity) = config.identity { + Some(verify_pubkey(identity)?) + } else { + None + }; + + let bank = meta.bank(config.commitment); + let (first_slot, last_slot) = match config.range { + None => ( + bank.epoch_schedule().get_first_slot_in_epoch(bank.epoch()), + bank.slot(), + ), + Some(range) => { + let first_slot = range.first_slot; + let last_slot = range.last_slot.unwrap_or_else(|| bank.slot()); + if last_slot < first_slot { + return Err(Error::invalid_params(format!( + "lastSlot, {last_slot}, cannot be less than firstSlot, {first_slot}" + ))); + } + (first_slot, last_slot) + } + }; + + let slot_history = bank.get_slot_history(); + if first_slot < slot_history.oldest() { + return Err(Error::invalid_params(format!( + "firstSlot, {}, is too small; min {}", + first_slot, + slot_history.oldest() + ))); + } + if last_slot > slot_history.newest() { + return Err(Error::invalid_params(format!( + "lastSlot, {}, is too large; max {}", + last_slot, + slot_history.newest() + ))); + } + + let slot_leaders = meta.get_slot_leaders( + config.commitment, + first_slot, + last_slot.saturating_sub(first_slot) as usize + 1, // +1 because last_slot is inclusive + )?; + + let mut block_production: HashMap<_, (usize, usize)> = HashMap::new(); + + let mut slot = first_slot; + for identity in slot_leaders { + if let Some(ref filter_by_identity) = filter_by_identity { + if identity != *filter_by_identity { + slot += 1; + continue; + } + } + + let entry = block_production.entry(identity).or_default(); + if slot_history.check(slot) == solana_sdk::slot_history::Check::Found { + entry.1 += 1; // Increment blocks_produced + } + entry.0 += 1; // Increment leader_slots + slot += 1; + } + + Ok(new_response( + &bank, + RpcBlockProduction { + by_identity: block_production + .into_iter() + .map(|(k, v)| (k.to_string(), v)) + .collect(), + range: RpcBlockProductionRange { + first_slot, + last_slot, + }, + }, + )) + } + } +} + +// RPC interface that depends on AccountsDB +// Expected to be provided by API nodes +pub mod rpc_accounts { + use super::*; + use solana_runtime::commitment::BlockCommitmentArray; + #[rpc] + pub trait AccountsData { + type Metadata; + + #[rpc(meta, name = "getAccountInfo")] + fn get_account_info( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getMultipleAccounts")] + fn get_multiple_accounts( + &self, + meta: Self::Metadata, + pubkey_strs: Vec, + config: Option, + ) -> Result>>>; + + #[rpc(meta, name = "getBlockCommitment")] + fn get_block_commitment( + &self, + meta: Self::Metadata, + block: Slot, + ) -> Result>; + + // SPL Token-specific RPC endpoints + // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for + // program details + + #[rpc(meta, name = "getTokenAccountBalance")] + fn get_token_account_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getTokenSupply")] + fn get_token_supply( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result>; + } + + pub struct AccountsDataImpl; + impl AccountsData for AccountsDataImpl { + type Metadata = JsonRpcRequestProcessor; + + fn get_account_info( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>> { + debug!("get_account_info rpc request received: {:?}", pubkey_str); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_account_info(&pubkey, config) + } + + fn get_multiple_accounts( + &self, + meta: Self::Metadata, + pubkey_strs: Vec, + config: Option, + ) -> Result>>> { + debug!( + "get_multiple_accounts rpc request received: {:?}", + pubkey_strs.len() + ); + + let max_multiple_accounts = meta + .config + .max_multiple_accounts + .unwrap_or(MAX_MULTIPLE_ACCOUNTS); + if pubkey_strs.len() > max_multiple_accounts { + return Err(Error::invalid_params(format!( + "Too many inputs provided; max {max_multiple_accounts}" + ))); + } + let pubkeys = pubkey_strs + .into_iter() + .map(|pubkey_str| verify_pubkey(&pubkey_str)) + .collect::>>()?; + meta.get_multiple_accounts(pubkeys, config) + } + + fn get_block_commitment( + &self, + meta: Self::Metadata, + block: Slot, + ) -> Result> { + debug!("get_block_commitment rpc request received"); + Ok(meta.get_block_commitment(block)) + } + + fn get_token_account_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result> { + debug!( + "get_token_account_balance rpc request received: {:?}", + pubkey_str + ); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_token_account_balance(&pubkey, commitment) + } + + fn get_token_supply( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result> { + debug!("get_token_supply rpc request received: {:?}", mint_str); + let mint = verify_pubkey(&mint_str)?; + meta.get_token_supply(&mint, commitment) + } + } +} + +// RPC interface that depends on AccountsDB and requires accounts scan +// Expected to be provided by API nodes for now, but collected for easy separation and removal in +// the future. +pub mod rpc_accounts_scan { + use super::*; + #[rpc] + pub trait AccountsScan { + type Metadata; + + #[rpc(meta, name = "getProgramAccounts")] + fn get_program_accounts( + &self, + meta: Self::Metadata, + program_id_str: String, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getLargestAccounts")] + fn get_largest_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getSupply")] + fn get_supply( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>; + + // SPL Token-specific RPC endpoints + // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for + // program details + + #[rpc(meta, name = "getTokenLargestAccounts")] + fn get_token_largest_accounts( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result>>; + + #[rpc(meta, name = "getTokenAccountsByOwner")] + fn get_token_accounts_by_owner( + &self, + meta: Self::Metadata, + owner_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getTokenAccountsByDelegate")] + fn get_token_accounts_by_delegate( + &self, + meta: Self::Metadata, + delegate_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>>; + } + + pub struct AccountsScanImpl; + impl AccountsScan for AccountsScanImpl { + type Metadata = JsonRpcRequestProcessor; + + fn get_program_accounts( + &self, + meta: Self::Metadata, + program_id_str: String, + config: Option, + ) -> Result>> { + debug!( + "get_program_accounts rpc request received: {:?}", + program_id_str + ); + let program_id = verify_pubkey(&program_id_str)?; + let (config, filters, with_context, sort_results) = if let Some(config) = config { + ( + Some(config.account_config), + config.filters.unwrap_or_default(), + config.with_context.unwrap_or_default(), + config.sort_results.unwrap_or(true), + ) + } else { + (None, vec![], false, true) + }; + if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { + return Err(Error::invalid_params(format!( + "Too many filters provided; max {MAX_GET_PROGRAM_ACCOUNT_FILTERS}" + ))); + } + for filter in &filters { + verify_filter(filter)?; + } + meta.get_program_accounts(&program_id, config, filters, with_context, sort_results) + } + + fn get_largest_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>> { + debug!("get_largest_accounts rpc request received"); + Ok(meta.get_largest_accounts(config)?) + } + + fn get_supply( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + debug!("get_supply rpc request received"); + Ok(meta.get_supply(config)?) + } + + fn get_token_largest_accounts( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result>> { + debug!( + "get_token_largest_accounts rpc request received: {:?}", + mint_str + ); + let mint = verify_pubkey(&mint_str)?; + meta.get_token_largest_accounts(&mint, commitment) + } + + fn get_token_accounts_by_owner( + &self, + meta: Self::Metadata, + owner_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>> { + debug!( + "get_token_accounts_by_owner rpc request received: {:?}", + owner_str + ); + let owner = verify_pubkey(&owner_str)?; + let token_account_filter = verify_token_account_filter(token_account_filter)?; + meta.get_token_accounts_by_owner(&owner, token_account_filter, config, true) + } + + fn get_token_accounts_by_delegate( + &self, + meta: Self::Metadata, + delegate_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>> { + debug!( + "get_token_accounts_by_delegate rpc request received: {:?}", + delegate_str + ); + let delegate = verify_pubkey(&delegate_str)?; + let token_account_filter = verify_token_account_filter(token_account_filter)?; + meta.get_token_accounts_by_delegate(&delegate, token_account_filter, config, true) + } + } +} + +// Full RPC interface that an API node is expected to provide +// (rpc_minimal should also be provided by an API node) +pub mod rpc_full { + use { + super::*, + solana_sdk::message::{SanitizedVersionedMessage, VersionedMessage}, + solana_transaction_status::UiInnerInstructions, + }; + #[rpc] + pub trait Full { + type Metadata; + + #[rpc(meta, name = "getInflationReward")] + fn get_inflation_reward( + &self, + meta: Self::Metadata, + address_strs: Vec, + config: Option, + ) -> BoxFuture>>>; + + #[rpc(meta, name = "getClusterNodes")] + fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result>; + + #[rpc(meta, name = "getRecentPerformanceSamples")] + fn get_recent_performance_samples( + &self, + meta: Self::Metadata, + limit: Option, + ) -> Result>; + + #[rpc(meta, name = "getSignatureStatuses")] + fn get_signature_statuses( + &self, + meta: Self::Metadata, + signature_strs: Vec, + config: Option, + ) -> BoxFuture>>>>; + + #[rpc(meta, name = "requestAirdrop")] + fn request_airdrop( + &self, + meta: Self::Metadata, + pubkey_str: String, + lamports: u64, + config: Option, + ) -> Result; + + #[rpc(meta, name = "sendTransaction")] + fn send_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result; + + #[rpc(meta, name = "simulateTransaction")] + fn simulate_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "minimumLedgerSlot")] + fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getBlock")] + fn get_block( + &self, + meta: Self::Metadata, + slot: Slot, + config: Option>, + ) -> BoxFuture>>; + + #[rpc(meta, name = "getBlockTime")] + fn get_block_time( + &self, + meta: Self::Metadata, + slot: Slot, + ) -> BoxFuture>>; + + #[rpc(meta, name = "getBlocks")] + fn get_blocks( + &self, + meta: Self::Metadata, + start_slot: Slot, + wrapper: Option, + config: Option, + ) -> BoxFuture>>; + + #[rpc(meta, name = "getBlocksWithLimit")] + fn get_blocks_with_limit( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: usize, + config: Option, + ) -> BoxFuture>>; + + #[rpc(meta, name = "getTransaction")] + fn get_transaction( + &self, + meta: Self::Metadata, + signature_str: String, + config: Option>, + ) -> BoxFuture>>; + + #[rpc(meta, name = "getSignaturesForAddress")] + fn get_signatures_for_address( + &self, + meta: Self::Metadata, + address: String, + config: Option, + ) -> BoxFuture>>; + + #[rpc(meta, name = "getFirstAvailableBlock")] + fn get_first_available_block(&self, meta: Self::Metadata) -> BoxFuture>; + + #[rpc(meta, name = "getLatestBlockhash")] + fn get_latest_blockhash( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "isBlockhashValid")] + fn is_blockhash_valid( + &self, + meta: Self::Metadata, + blockhash: String, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "getFeeForMessage")] + fn get_fee_for_message( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getStakeMinimumDelegation")] + fn get_stake_minimum_delegation( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "getRecentPrioritizationFees")] + fn get_recent_prioritization_fees( + &self, + meta: Self::Metadata, + pubkey_strs: Option>, + ) -> Result>; + } + + pub struct FullImpl; + impl Full for FullImpl { + type Metadata = JsonRpcRequestProcessor; + + fn get_inflation_reward( + &self, + meta: Self::Metadata, + address_strs: Vec, + config: Option, + ) -> BoxFuture>>> { + debug!( + "get_inflation_reward rpc request received: {:?}", + address_strs.len() + ); + + let mut addresses: Vec = vec![]; + for address_str in address_strs { + match verify_pubkey(&address_str) { + Ok(pubkey) => { + addresses.push(pubkey); + } + Err(err) => return Box::pin(future::err(err)), + } + } + + Box::pin(async move { meta.get_inflation_reward(addresses, config).await }) + } + + fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result> { + debug!("get_cluster_nodes rpc request received"); + Ok(vec![RpcContactInfo { + pubkey: meta.bank(None).collector_id().to_string(), + gossip: None, + tvu: None, + tpu: None, + tpu_quic: None, + tpu_forwards: None, + tpu_forwards_quic: None, + tpu_vote: None, + serve_repair: None, + rpc: None, + pubsub: None, + version: Some(solana_version::Version::default().to_string()), + feature_set: Some(solana_version::Version::default().feature_set), + shred_version: None, + }]) + } + + fn get_recent_performance_samples( + &self, + meta: Self::Metadata, + limit: Option, + ) -> Result> { + debug!("get_recent_performance_samples request received"); + + let limit = limit.unwrap_or(PERFORMANCE_SAMPLES_LIMIT); + + if limit > PERFORMANCE_SAMPLES_LIMIT { + return Err(Error::invalid_params(format!( + "Invalid limit; max {PERFORMANCE_SAMPLES_LIMIT}" + ))); + } + + Ok(meta + .blockstore + .get_recent_perf_samples(limit) + .map_err(|err| { + warn!("get_recent_performance_samples failed: {:?}", err); + Error::invalid_request() + })? + .into_iter() + .map(|(slot, sample)| rpc_perf_sample_from_perf_sample(slot, sample)) + .collect()) + } + + fn get_signature_statuses( + &self, + meta: Self::Metadata, + signature_strs: Vec, + config: Option, + ) -> BoxFuture>>>> { + debug!( + "get_signature_statuses rpc request received: {:?}", + signature_strs.len() + ); + if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS { + return Box::pin(future::err(Error::invalid_params(format!( + "Too many inputs provided; max {MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS}" + )))); + } + let mut signatures: Vec = vec![]; + for signature_str in signature_strs { + match verify_signature(&signature_str) { + Ok(signature) => { + signatures.push(signature); + } + Err(err) => return Box::pin(future::err(err)), + } + } + Box::pin(async move { meta.get_signature_statuses(signatures, config).await }) + } + + fn request_airdrop( + &self, + meta: Self::Metadata, + pubkey_str: String, + lamports: u64, + config: Option, + ) -> Result { + debug!("request_airdrop rpc request received"); + trace!( + "request_airdrop id={} lamports={} config: {:?}", + pubkey_str, + lamports, + &config + ); + + let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?; + let pubkey = verify_pubkey(&pubkey_str)?; + + let config = config.unwrap_or_default(); + let bank = meta.bank(config.commitment); + + let blockhash = if let Some(blockhash) = config.recent_blockhash { + verify_hash(&blockhash)? + } else { + bank.confirmed_last_blockhash() + }; + let last_valid_block_height = bank + .get_blockhash_last_valid_block_height(&blockhash) + .unwrap_or(0); + + let unsanitized_tx = + request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash).map_err( + |err| { + info!("request_airdrop_transaction failed: {:?}", err); + Error::internal_error() + }, + )?; + let transaction = sanitize_transaction( + unsanitized_tx.into(), + bank.as_ref(), + bank.get_reserved_account_keys(), + )?; + let signature = *transaction.signature(); + + _send_transaction(meta, signature, transaction, last_valid_block_height, None) + } + + fn send_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result { + debug!("send_transaction rpc request received"); + let RpcSendTransactionConfig { + skip_preflight, + preflight_commitment, + encoding, + max_retries: _, // Note: max_retries is not used here + min_context_slot, + } = config.unwrap_or_default(); + let tx_encoding = encoding.unwrap_or(UiTransactionEncoding::Base58); + let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { + Error::invalid_params(format!( + "unsupported encoding: {tx_encoding}. Supported encodings: base58, base64" + )) + })?; + let unsanitized_tx = + decode_and_deserialize::(data, binary_encoding)?; + + let preflight_commitment = if skip_preflight { + Some(CommitmentConfig::processed()) + } else { + preflight_commitment.map(|commitment| CommitmentConfig { commitment }) + }; + let preflight_bank = &*meta.get_bank_with_config(RpcContextConfig { + commitment: preflight_commitment, + min_context_slot, + })?; + + let transaction = sanitize_transaction( + unsanitized_tx, + preflight_bank, + preflight_bank.get_reserved_account_keys(), + )?; + let signature = *transaction.signature(); + + let mut last_valid_block_height = preflight_bank + .get_blockhash_last_valid_block_height(transaction.message().recent_blockhash()) + .unwrap_or(0); + + let durable_nonce_info = transaction + .get_durable_nonce() + .map(|&pubkey| (pubkey, *transaction.message().recent_blockhash())); + if durable_nonce_info.is_some() || (skip_preflight && last_valid_block_height == 0) { + // While it uses a defined constant, this last_valid_block_height value is chosen arbitrarily. + // It provides a fallback timeout for durable-nonce transaction retries in case of + // malicious packing of the retry queue. Durable-nonce transactions are otherwise + // retried until the nonce is advanced. + last_valid_block_height = preflight_bank.block_height() + MAX_PROCESSING_AGE as u64; + } + + if !skip_preflight { + verify_transaction(&transaction, &preflight_bank.feature_set)?; + + if let TransactionSimulationResult { + result: Err(err), + logs, + post_simulation_accounts: _, + units_consumed, + return_data, + inner_instructions: _, // Always `None` due to `enable_cpi_recording = false` + } = preflight_bank.simulate_transaction(&transaction, false) + { + match err { + TransactionError::BlockhashNotFound => { + inc_new_counter_info!("rpc-send-tx_err-blockhash-not-found", 1); + } + _ => { + inc_new_counter_info!("rpc-send-tx_err-other", 1); + } + } + return Err(RpcCustomError::SendTransactionPreflightFailure { + message: format!("Transaction simulation failed: {err}"), + result: RpcSimulateTransactionResult { + err: Some(err), + logs: Some(logs), + accounts: None, + units_consumed: Some(units_consumed), + return_data: return_data.map(|return_data| return_data.into()), + inner_instructions: None, + replacement_blockhash: None, + }, + } + .into()); + } + } + + _send_transaction( + meta, + signature, + transaction, + last_valid_block_height, + durable_nonce_info, + ) + } + + fn simulate_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result> { + debug!("simulate_transaction rpc request received"); + let RpcSimulateTransactionConfig { + sig_verify, + replace_recent_blockhash, + commitment, + encoding, + accounts: config_accounts, + min_context_slot, + inner_instructions: enable_cpi_recording, + } = config.unwrap_or_default(); + let tx_encoding = encoding.unwrap_or(UiTransactionEncoding::Base58); + let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { + Error::invalid_params(format!( + "unsupported encoding: {tx_encoding}. Supported encodings: base58, base64" + )) + })?; + let mut unsanitized_tx = + decode_and_deserialize::(data, binary_encoding)?; + + let bank = &*meta.get_bank_with_config(RpcContextConfig { + commitment, + min_context_slot, + })?; + let mut blockhash: Option = None; + if replace_recent_blockhash { + if sig_verify { + return Err(Error::invalid_params( + "sigVerify may not be used with replaceRecentBlockhash", + )); + } + let recent_blockhash = bank.last_blockhash(); + unsanitized_tx + .message + .set_recent_blockhash(recent_blockhash); + let last_valid_block_height = bank + .get_blockhash_last_valid_block_height(&recent_blockhash) + .expect("bank blockhash queue should contain blockhash"); + blockhash.replace(RpcBlockhash { + blockhash: recent_blockhash.to_string(), + last_valid_block_height, + }); + } + + let transaction = + sanitize_transaction(unsanitized_tx, bank, bank.get_reserved_account_keys())?; + if sig_verify { + verify_transaction(&transaction, &bank.feature_set)?; + } + + let TransactionSimulationResult { + result, + logs, + post_simulation_accounts, + units_consumed, + return_data, + inner_instructions, + } = bank.simulate_transaction(&transaction, enable_cpi_recording); + + let account_keys = transaction.message().account_keys(); + let number_of_accounts = account_keys.len(); + + let accounts = if let Some(config_accounts) = config_accounts { + let accounts_encoding = config_accounts + .encoding + .unwrap_or(UiAccountEncoding::Base64); + + if accounts_encoding == UiAccountEncoding::Binary + || accounts_encoding == UiAccountEncoding::Base58 + { + return Err(Error::invalid_params("base58 encoding not supported")); + } + + if config_accounts.addresses.len() > number_of_accounts { + return Err(Error::invalid_params(format!( + "Too many accounts provided; max {number_of_accounts}" + ))); + } + + if result.is_err() { + Some(vec![None; config_accounts.addresses.len()]) + } else { + let mut post_simulation_accounts_map = HashMap::new(); + for (pubkey, data) in post_simulation_accounts { + post_simulation_accounts_map.insert(pubkey, data); + } + + Some( + config_accounts + .addresses + .iter() + .map(|address_str| { + let pubkey = verify_pubkey(address_str)?; + get_encoded_account( + bank, + &pubkey, + accounts_encoding, + None, + Some(&post_simulation_accounts_map), + ) + }) + .collect::>>()?, + ) + } + } else { + None + }; + + let inner_instructions = inner_instructions.map(|info| { + map_inner_instructions(info) + .map(|converted| UiInnerInstructions::parse(converted, &account_keys)) + .collect() + }); + + Ok(new_response( + bank, + RpcSimulateTransactionResult { + err: result.err(), + logs: Some(logs), + accounts, + units_consumed: Some(units_consumed), + return_data: return_data.map(|return_data| return_data.into()), + inner_instructions, + replacement_blockhash: blockhash, + }, + )) + } + + fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result { + debug!("minimum_ledger_slot rpc request received"); + meta.minimum_ledger_slot() + } + + fn get_block( + &self, + meta: Self::Metadata, + slot: Slot, + config: Option>, + ) -> BoxFuture>> { + debug!("get_block rpc request received: {:?}", slot); + Box::pin(async move { meta.get_block(slot, config).await }) + } + + fn get_block_time( + &self, + meta: Self::Metadata, + slot: Slot, + ) -> BoxFuture>> { + Box::pin(async move { meta.get_block_time(slot).await }) + } + + fn get_blocks( + &self, + meta: Self::Metadata, + start_slot: Slot, + wrapper: Option, + config: Option, + ) -> BoxFuture>> { + let (end_slot, maybe_config) = + wrapper.map(|wrapper| wrapper.unzip()).unwrap_or_default(); + debug!( + "get_blocks rpc request received: {}-{:?}", + start_slot, end_slot + ); + Box::pin(async move { + meta.get_blocks(start_slot, end_slot, config.or(maybe_config)) + .await + }) + } + + fn get_blocks_with_limit( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: usize, + config: Option, + ) -> BoxFuture>> { + debug!( + "get_blocks_with_limit rpc request received: {}-{}", + start_slot, limit, + ); + Box::pin(async move { meta.get_blocks_with_limit(start_slot, limit, config).await }) + } + + fn get_transaction( + &self, + meta: Self::Metadata, + signature_str: String, + config: Option>, + ) -> BoxFuture>> { + debug!("get_transaction rpc request received: {:?}", signature_str); + let signature = verify_signature(&signature_str); + if let Err(err) = signature { + return Box::pin(future::err(err)); + } + Box::pin(async move { meta.get_transaction(signature.unwrap(), config).await }) + } + + fn get_signatures_for_address( + &self, + meta: Self::Metadata, + address: String, + config: Option, + ) -> BoxFuture>> { + let RpcSignaturesForAddressConfig { + before, + until, + limit, + commitment, + min_context_slot, + } = config.unwrap_or_default(); + let verification = + verify_and_parse_signatures_for_address_params(address, before, until, limit); + + match verification { + Err(err) => Box::pin(future::err(err)), + Ok((address, before, until, limit)) => Box::pin(async move { + meta.get_signatures_for_address( + address, + before, + until, + limit, + RpcContextConfig { + commitment, + min_context_slot, + }, + ) + .await + }), + } + } + + fn get_first_available_block(&self, meta: Self::Metadata) -> BoxFuture> { + debug!("get_first_available_block rpc request received"); + Box::pin(async move { Ok(meta.get_first_available_block().await) }) + } + + fn get_latest_blockhash( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + debug!("get_latest_blockhash rpc request received"); + meta.get_latest_blockhash(config.unwrap_or_default()) + } + + fn is_blockhash_valid( + &self, + meta: Self::Metadata, + blockhash: String, + config: Option, + ) -> Result> { + let blockhash = + Hash::from_str(&blockhash).map_err(|e| Error::invalid_params(format!("{e:?}")))?; + meta.is_blockhash_valid(&blockhash, config.unwrap_or_default()) + } + + fn get_fee_for_message( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result>> { + debug!("get_fee_for_message rpc request received"); + let message = decode_and_deserialize::( + data, + TransactionBinaryEncoding::Base64, + )?; + let bank = &*meta.get_bank_with_config(config.unwrap_or_default())?; + let sanitized_versioned_message = SanitizedVersionedMessage::try_from(message) + .map_err(|err| { + Error::invalid_params(format!("invalid transaction message: {err}")) + })?; + let sanitized_message = SanitizedMessage::try_new( + sanitized_versioned_message, + bank, + bank.get_reserved_account_keys(), + ) + .map_err(|err| Error::invalid_params(format!("invalid transaction message: {err}")))?; + let fee = bank.get_fee_for_message(&sanitized_message); + Ok(new_response(bank, fee)) + } + + fn get_stake_minimum_delegation( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + debug!("get_stake_minimum_delegation rpc request received"); + meta.get_stake_minimum_delegation(config.unwrap_or_default()) + } + + fn get_recent_prioritization_fees( + &self, + meta: Self::Metadata, + pubkey_strs: Option>, + ) -> Result> { + let pubkey_strs = pubkey_strs.unwrap_or_default(); + debug!( + "get_recent_prioritization_fees rpc request received: {:?} pubkeys", + pubkey_strs.len() + ); + if pubkey_strs.len() > MAX_TX_ACCOUNT_LOCKS { + return Err(Error::invalid_params(format!( + "Too many inputs provided; max {MAX_TX_ACCOUNT_LOCKS}" + ))); + } + let pubkeys = pubkey_strs + .into_iter() + .map(|pubkey_str| verify_pubkey(&pubkey_str)) + .collect::>>()?; + meta.get_recent_prioritization_fees(pubkeys) + } + } +} + +fn rpc_perf_sample_from_perf_sample(slot: u64, sample: PerfSample) -> RpcPerfSample { + match sample { + PerfSample::V1(PerfSampleV1 { + num_transactions, + num_slots, + sample_period_secs, + }) => RpcPerfSample { + slot, + num_transactions, + num_non_vote_transactions: None, + num_slots, + sample_period_secs, + }, + PerfSample::V2(PerfSampleV2 { + num_transactions, + num_non_vote_transactions, + num_slots, + sample_period_secs, + }) => RpcPerfSample { + slot, + num_transactions, + num_non_vote_transactions: Some(num_non_vote_transactions), + num_slots, + sample_period_secs, + }, + } +} + +const MAX_BASE58_SIZE: usize = 1683; // Golden, bump if PACKET_DATA_SIZE changes +const MAX_BASE64_SIZE: usize = 1644; // Golden, bump if PACKET_DATA_SIZE changes +fn decode_and_deserialize(encoded: String, encoding: TransactionBinaryEncoding) -> Result +where + T: serde::de::DeserializeOwned, +{ + let wire_output = match encoding { + TransactionBinaryEncoding::Base58 => { + inc_new_counter_info!("rpc-base58_encoded_tx", 1); + if encoded.len() > MAX_BASE58_SIZE { + return Err(Error::invalid_params(format!( + "base58 encoded {} too large: {} bytes (max: encoded/raw {}/{})", + type_name::(), + encoded.len(), + MAX_BASE58_SIZE, + PACKET_DATA_SIZE, + ))); + } + bs58::decode(encoded) + .into_vec() + .map_err(|e| Error::invalid_params(format!("invalid base58 encoding: {e:?}")))? + } + TransactionBinaryEncoding::Base64 => { + inc_new_counter_info!("rpc-base64_encoded_tx", 1); + if encoded.len() > MAX_BASE64_SIZE { + return Err(Error::invalid_params(format!( + "base64 encoded {} too large: {} bytes (max: encoded/raw {}/{})", + type_name::(), + encoded.len(), + MAX_BASE64_SIZE, + PACKET_DATA_SIZE, + ))); + } + BASE64_STANDARD + .decode(encoded) + .map_err(|e| Error::invalid_params(format!("invalid base64 encoding: {e:?}")))? + } + }; + if wire_output.len() > PACKET_DATA_SIZE { + return Err(Error::invalid_params(format!( + "decoded {} too large: {} bytes (max: {} bytes)", + type_name::(), + wire_output.len(), + PACKET_DATA_SIZE + ))); + } + bincode::options() + .with_limit(PACKET_DATA_SIZE as u64) + .with_fixint_encoding() + .allow_trailing_bytes() + .deserialize_from(&wire_output[..]) + .map_err(|err| { + Error::invalid_params(format!( + "failed to deserialize {}: {}", + type_name::(), + &err.to_string() + )) + }) +} + +fn sanitize_transaction( + transaction: VersionedTransaction, + address_loader: impl AddressLoader, + reserved_account_keys: &HashSet, +) -> Result { + SanitizedTransaction::try_create( + transaction, + MessageHash::Compute, + None, + address_loader, + reserved_account_keys, + ) + .map_err(|err| Error::invalid_params(format!("invalid transaction: {err}"))) +} + +pub fn create_node_exit(exit: Arc) -> Arc> { + let mut node_exit = Exit::default(); + node_exit.register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); + Arc::new(RwLock::new(node_exit)) +} + +pub fn create_test_transaction_entries( + keypairs: Vec<&Keypair>, + bank: Arc, +) -> (Vec, Vec) { + let mint_keypair = keypairs[0]; + let keypair1 = keypairs[1]; + let keypair2 = keypairs[2]; + let keypair3 = keypairs[3]; + let blockhash = bank.confirmed_last_blockhash(); + let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0); + + let mut signatures = Vec::new(); + // Generate transactions for processing + // Successful transaction + let success_tx = solana_sdk::system_transaction::transfer( + mint_keypair, + &keypair1.pubkey(), + rent_exempt_amount, + blockhash, + ); + signatures.push(success_tx.signatures[0]); + let entry_1 = solana_entry::entry::next_entry(&blockhash, 1, vec![success_tx]); + // Failed transaction, InstructionError + let ix_error_tx = solana_sdk::system_transaction::transfer( + keypair2, + &keypair3.pubkey(), + 2 * rent_exempt_amount, + blockhash, + ); + signatures.push(ix_error_tx.signatures[0]); + let entry_2 = solana_entry::entry::next_entry(&entry_1.hash, 1, vec![ix_error_tx]); + (vec![entry_1, entry_2], signatures) +} + +#[cfg(test)] +pub(crate) fn populate_blockstore_for_tests( + entries: Vec, + bank: Arc, + blockstore: Arc, + max_complete_transaction_status_slot: Arc, +) { + let slot = bank.slot(); + let parent_slot = bank.parent_slot(); + let shreds = solana_ledger::blockstore::entries_to_test_shreds( + &entries, + slot, + parent_slot, + true, + 0, + true, // merkle_variant + ); + blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore.set_roots(std::iter::once(&slot)).unwrap(); + + let (transaction_status_sender, transaction_status_receiver) = unbounded(); + let (replay_vote_sender, _replay_vote_receiver) = unbounded(); + let transaction_status_service = + solana_rpc::transaction_status_service::TransactionStatusService::new( + transaction_status_receiver, + max_complete_transaction_status_slot, + true, + None, + blockstore, + false, + Arc::new(AtomicBool::new(false)), + ); + + // Check that process_entries successfully writes can_commit transactions statuses, and + // that they are matched properly by get_rooted_block + use solana_runtime::installed_scheduler_pool::BankWithScheduler; + assert_eq!( + solana_ledger::blockstore_processor::process_entries_for_tests( + &BankWithScheduler::new_without_scheduler(bank), + entries, + Some( + &solana_ledger::blockstore_processor::TransactionStatusSender { + sender: transaction_status_sender, + }, + ), + Some(&replay_vote_sender), + ), + Ok(()) + ); + + transaction_status_service.join().unwrap(); +} + +#[cfg(test)] +mod tests { + use { + super::{ + rpc_accounts::*, rpc_accounts_scan::*, rpc_bank::*, rpc_full::*, rpc_minimal::*, *, + }, + bincode::{deserialize, serialize}, + jsonrpc_core::{futures, ErrorCode, MetaIoHandler, Output, Response, Value}, + jsonrpc_core_client::transports::local, + serde::de::DeserializeOwned, + solana_entry::entry::next_versioned_entry, + solana_ledger::{ + blockstore_meta::PerfSampleV2, + blockstore_processor::fill_blockstore_slot_with_ticks, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }, + solana_rpc_client_api::{ + custom_error::{ + JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE, + JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE, + JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION, + }, + filter::MemcmpEncodedBytes, + }, + solana_runtime::{ + accounts_background_service::AbsRequestSender, bank::BankTestConfig, + commitment::BlockCommitment, non_circulating_supply::non_circulating_accounts, + }, + solana_sdk::{ + account::{Account, WritableAccount}, + address_lookup_table::{ + self, + state::{AddressLookupTable, LookupTableMeta}, + }, + compute_budget::ComputeBudgetInstruction, + fee_calculator::FeeRateGovernor, + hash::{hash, Hash}, + instruction::InstructionError, + message::{ + v0::{self, MessageAddressTableLookup}, + Message, MessageHeader, VersionedMessage, + }, + nonce::{self, state::DurableNonce}, + reserved_account_keys::ReservedAccountKeys, + signature::{Keypair, Signer}, + slot_hashes::SlotHashes, + system_program, system_transaction, + timing::slot_duration_from_slots_per_year, + transaction::{ + self, SimpleAddressLoader, Transaction, TransactionError, TransactionVersion, + }, + }, + solana_transaction_status::{ + EncodedConfirmedBlock, EncodedTransaction, EncodedTransactionWithStatusMeta, + TransactionDetails, + }, + spl_pod::optional_keys::OptionalNonZeroPubkey, + spl_token_2022::{ + extension::{ + immutable_owner::ImmutableOwner, memo_transfer::MemoTransfer, + mint_close_authority::MintCloseAuthority, BaseStateWithExtensionsMut, + ExtensionType, StateWithExtensionsMut, + }, + solana_program::{program_option::COption, pubkey::Pubkey as SplTokenPubkey}, + state::{AccountState as TokenAccountState, Mint}, + }, + std::{borrow::Cow, collections::HashMap}, + }; + + const TEST_MINT_LAMPORTS: u64 = 1_000_000_000; + const TEST_SIGNATURE_FEE: u64 = 5000; + const TEST_SLOTS_PER_EPOCH: u64 = 128 + 1; + + fn create_test_request(method: &str, params: Option) -> Value { + json!({ + "jsonrpc": "2.0", + "id": 1u64, + "method": method, + "params": params, + }) + } + + fn parse_success_result(response: Response) -> T { + if let Response::Single(output) = response { + match output { + Output::Success(success) => serde_json::from_value(success.result).unwrap(), + Output::Failure(failure) => { + panic!("Expected success but received: {failure:?}"); + } + } + } else { + panic!("Expected single response"); + } + } + + fn parse_failure_response(response: Response) -> (i64, String) { + if let Response::Single(output) = response { + match output { + Output::Success(success) => { + panic!("Expected failure but received: {success:?}"); + } + Output::Failure(failure) => (failure.error.code.code(), failure.error.message), + } + } else { + panic!("Expected single response"); + } + } + + struct RpcHandler { + io: MetaIoHandler, + meta: JsonRpcRequestProcessor, + mint_keypair: Keypair, + blockstore: Arc, + bank_forks: Arc>, + max_complete_transaction_status_slot: Arc, + } + + impl RpcHandler { + fn start() -> Self { + Self::start_with_config(JsonRpcConfig { + enable_rpc_transaction_history: true, + ..JsonRpcConfig::default() + }) + } + + fn start_with_config(config: JsonRpcConfig) -> Self { + let (bank_forks, mint_keypair, _leader_vote_keypair) = + new_bank_forks_with_config(BankTestConfig { + secondary_indexes: config.account_indexes.clone(), + }); + + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + + let exit = Arc::new(AtomicBool::new(false)); + let node_exit = create_node_exit(exit); + // note that this means that slot 0 will always be considered complete + let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(0)); + + let meta = JsonRpcRequestProcessor::new( + config, + None, + bank_forks.clone(), + blockstore.clone(), + node_exit, + Hash::default(), + unbounded(), + None, + Arc::new(RwLock::new(LargestAccountsCache::new(30))), + max_complete_transaction_status_slot.clone(), + Arc::new(PrioritizationFeeCache::default()), + ); + + let mut io = MetaIoHandler::default(); + io.extend_with(MinimalImpl.to_delegate()); + io.extend_with(BankDataImpl.to_delegate()); + io.extend_with(AccountsDataImpl.to_delegate()); + io.extend_with(AccountsScanImpl.to_delegate()); + io.extend_with(FullImpl.to_delegate()); + Self { + io, + meta, + mint_keypair, + bank_forks, + blockstore, + max_complete_transaction_status_slot, + } + } + + fn handle_request_sync(&self, req: Value) -> Response { + let response = &self + .io + .handle_request_sync(&req.to_string(), self.meta.clone()) + .expect("no response"); + serde_json::from_str(response).expect("failed to deserialize response") + } + + fn overwrite_working_bank_entries(&self, entries: Vec) { + populate_blockstore_for_tests( + entries, + self.working_bank(), + self.blockstore.clone(), + self.max_complete_transaction_status_slot.clone(), + ); + } + + fn create_test_transactions_and_populate_blockstore(&self) -> Vec { + let mint_keypair = &self.mint_keypair; + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let keypair3 = Keypair::new(); + let bank = self.working_bank(); + let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0); + bank.transfer( + rent_exempt_amount + TEST_SIGNATURE_FEE, + mint_keypair, + &keypair2.pubkey(), + ) + .unwrap(); + + let (entries, signatures) = create_test_transaction_entries( + vec![&self.mint_keypair, &keypair1, &keypair2, &keypair3], + bank, + ); + self.overwrite_working_bank_entries(entries); + signatures + } + + fn create_test_versioned_transactions_and_populate_blockstore( + &self, + address_table_key: Option, + ) -> Vec { + let address_table_key = + address_table_key.unwrap_or_else(|| self.store_address_lookup_table()); + + let bank = self.working_bank(); + let recent_blockhash = bank.confirmed_last_blockhash(); + let legacy_message = VersionedMessage::Legacy(Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + recent_blockhash, + account_keys: vec![self.mint_keypair.pubkey()], + instructions: vec![], + }); + let version_0_message = VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + recent_blockhash, + account_keys: vec![self.mint_keypair.pubkey()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: address_table_key, + writable_indexes: vec![0], + readonly_indexes: vec![], + }], + instructions: vec![], + }); + + let mut signatures = Vec::new(); + let legacy_tx = + VersionedTransaction::try_new(legacy_message, &[&self.mint_keypair]).unwrap(); + signatures.push(legacy_tx.signatures[0]); + let version_0_tx = + VersionedTransaction::try_new(version_0_message, &[&self.mint_keypair]).unwrap(); + signatures.push(version_0_tx.signatures[0]); + let entry1 = next_versioned_entry(&recent_blockhash, 1, vec![legacy_tx]); + let entry2 = next_versioned_entry(&entry1.hash, 1, vec![version_0_tx]); + let entries = vec![entry1, entry2]; + self.overwrite_working_bank_entries(entries); + signatures + } + + fn store_address_lookup_table(&self) -> Pubkey { + let bank = self.working_bank(); + let address_table_pubkey = Pubkey::new_unique(); + let address_table_account = { + let address_table_state = AddressLookupTable { + meta: LookupTableMeta { + // ensure that active address length is 1 at slot 0 + last_extended_slot_start_index: 1, + ..LookupTableMeta::default() + }, + addresses: Cow::Owned(vec![Pubkey::new_unique()]), + }; + let address_table_data = address_table_state.serialize_for_tests().unwrap(); + let min_balance_lamports = + bank.get_minimum_balance_for_rent_exemption(address_table_data.len()); + AccountSharedData::create( + min_balance_lamports, + address_table_data, + address_lookup_table::program::id(), + false, + 0, + ) + }; + bank.store_account(&address_table_pubkey, &address_table_account); + address_table_pubkey + } + + fn add_roots_to_blockstore(&self, mut roots: Vec) { + roots.retain(|&slot| slot > 0); + if roots.is_empty() { + return; + } + + let mut parent_bank = self.bank_forks.read().unwrap().working_bank(); + for (i, root) in roots.iter().enumerate() { + let new_bank = + Bank::new_from_parent(parent_bank.clone(), parent_bank.collector_id(), *root); + parent_bank = self + .bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); + let parent = if i > 0 { roots[i - 1] } else { 0 }; + fill_blockstore_slot_with_ticks( + &self.blockstore, + 5, + *root, + parent, + Hash::default(), + ); + } + self.blockstore.set_roots(roots.iter()).unwrap(); + let new_bank = Bank::new_from_parent( + parent_bank.clone(), + parent_bank.collector_id(), + roots.iter().max().unwrap() + 1, + ); + self.bank_forks.write().unwrap().insert(new_bank); + + for root in roots.iter() { + self.bank_forks + .write() + .unwrap() + .set_root(*root, &AbsRequestSender::default(), Some(0)) + .unwrap(); + let block_time = self + .bank_forks + .read() + .unwrap() + .get(*root) + .unwrap() + .clock() + .unix_timestamp; + self.blockstore.cache_block_time(*root, block_time).unwrap(); + } + } + + fn advance_bank_to_confirmed_slot(&self, slot: Slot) -> Arc { + let parent_bank = self.working_bank(); + self.bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)) + .clone_without_scheduler() + } + + fn update_prioritization_fee_cache(&self, transactions: Vec) { + let bank = self.working_bank(); + let prioritization_fee_cache = &self.meta.prioritization_fee_cache; + let transactions: Vec<_> = transactions + .into_iter() + .map(SanitizedTransaction::from_transaction_for_tests) + .collect(); + prioritization_fee_cache.update(&bank, transactions.iter()); + } + + fn get_prioritization_fee_cache(&self) -> &PrioritizationFeeCache { + &self.meta.prioritization_fee_cache + } + + fn working_bank(&self) -> Arc { + self.bank_forks.read().unwrap().working_bank() + } + + fn leader_pubkey(&self) -> Pubkey { + *self.working_bank().collector_id() + } + } + + #[test] + fn test_rpc_request_processor_new() { + let bob_pubkey = solana_sdk::pubkey::new_rand(); + let genesis = create_genesis_config(100); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let meta = JsonRpcRequestProcessor::new_from_bank(bank); + + let bank = meta.bank_forks.read().unwrap().root_bank(); + bank.transfer(20, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + + assert_eq!( + meta.get_transaction_count(RpcContextConfig::default()) + .unwrap(), + 1 + ); + } + + #[test] + fn test_rpc_get_balance() { + let genesis = create_genesis_config(20); + let mint_pubkey = genesis.mint_keypair.pubkey(); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let meta = JsonRpcRequestProcessor::new_from_bank(bank); + + let mut io = MetaIoHandler::default(); + io.extend_with(MinimalImpl.to_delegate()); + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{mint_pubkey}"]}}"# + ); + let res = io.handle_request_sync(&req, meta); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":20, + }, + "id": 1, + }); + let result = serde_json::from_str::(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_get_balance_via_client() { + let genesis = create_genesis_config(20); + let mint_pubkey = genesis.mint_keypair.pubkey(); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let meta = JsonRpcRequestProcessor::new_from_bank(bank); + + let mut io = MetaIoHandler::default(); + io.extend_with(MinimalImpl.to_delegate()); + + async fn use_client(client: rpc_minimal::gen_client::Client, mint_pubkey: Pubkey) -> u64 { + client + .get_balance(mint_pubkey.to_string(), None) + .await + .unwrap() + .value + } + + let fut = async { + let (client, server) = + local::connect_with_metadata::(&io, meta); + let client = use_client(client, mint_pubkey); + + futures::join!(client, server) + }; + let (response, _) = futures::executor::block_on(fut); + assert_eq!(response, 20); + } + + #[test] + fn test_rpc_get_recent_performance_samples() { + let rpc = RpcHandler::start(); + + let slot = 0; + let num_slots = 1; + let num_transactions = 4; + let num_non_vote_transactions = 1; + let sample_period_secs = 60; + rpc.blockstore + .write_perf_sample( + slot, + &PerfSampleV2 { + num_slots, + num_transactions, + num_non_vote_transactions, + sample_period_secs, + }, + ) + .expect("write to blockstore"); + + let request = create_test_request("getRecentPerformanceSamples", None); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([{ + "slot": slot, + "numSlots": num_slots, + "numTransactions": num_transactions, + "numNonVoteTransactions": num_non_vote_transactions, + "samplePeriodSecs": sample_period_secs, + }]); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_get_recent_performance_samples_invalid_limit() { + let rpc = RpcHandler::start(); + let request = create_test_request("getRecentPerformanceSamples", Some(json!([10_000]))); + let response = parse_failure_response(rpc.handle_request_sync(request)); + let expected = ( + ErrorCode::InvalidParams.code(), + String::from("Invalid limit; max 720"), + ); + assert_eq!(response, expected); + } + + #[test] + fn test_rpc_get_slot_leader() { + let rpc = RpcHandler::start(); + let request = create_test_request("getSlotLeader", None); + let result: String = parse_success_result(rpc.handle_request_sync(request)); + let expected = rpc.leader_pubkey().to_string(); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_get_tx_count() { + let bob_pubkey = solana_sdk::pubkey::new_rand(); + let genesis = create_genesis_config(10); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let meta = JsonRpcRequestProcessor::new_from_bank(bank); + + let mut io = MetaIoHandler::default(); + io.extend_with(MinimalImpl.to_delegate()); + + // Add 4 transactions + let bank = meta.bank_forks.read().unwrap().root_bank(); + bank.transfer(1, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + bank.transfer(2, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + bank.transfer(3, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + bank.transfer(4, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + + let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; + let res = io.handle_request_sync(req, meta); + let expected = r#"{"jsonrpc":"2.0","result":4,"id":1}"#; + let expected: Response = + serde_json::from_str(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_minimum_ledger_slot() { + let rpc = RpcHandler::start(); + // populate blockstore so that a minimum slot can be detected + rpc.create_test_transactions_and_populate_blockstore(); + let request = create_test_request("minimumLedgerSlot", None); + let result: Slot = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(0, result); + } + + #[test] + fn test_get_supply() { + let rpc = RpcHandler::start(); + let request = create_test_request("getSupply", None); + let result = { + let mut result: RpcResponse = + parse_success_result(rpc.handle_request_sync(request)); + result.value.non_circulating_accounts.sort(); + result.value + }; + let expected = { + let mut non_circulating_accounts: Vec = non_circulating_accounts() + .iter() + .map(|pubkey| pubkey.to_string()) + .collect(); + non_circulating_accounts.sort(); + let total_capitalization = rpc.working_bank().capitalization(); + RpcSupply { + non_circulating: 0, + circulating: total_capitalization, + total: total_capitalization, + non_circulating_accounts, + } + }; + assert_eq!(result, expected); + } + + #[test] + fn test_get_supply_exclude_account_list() { + let rpc = RpcHandler::start(); + let request = create_test_request( + "getSupply", + Some(json!([{"excludeNonCirculatingAccountsList": true}])), + ); + let result: RpcResponse = parse_success_result(rpc.handle_request_sync(request)); + let expected = { + let total_capitalization = rpc.working_bank().capitalization(); + RpcSupply { + non_circulating: 0, + circulating: total_capitalization, + total: total_capitalization, + non_circulating_accounts: vec![], + } + }; + assert_eq!(result.value, expected); + } + + #[test] + fn test_get_largest_accounts() { + let rpc = RpcHandler::start(); + + // make a non-circulating account one of the largest accounts + let non_circulating_key = &non_circulating_accounts()[0]; + let bank = rpc.working_bank(); + bank.process_transaction(&system_transaction::transfer( + &rpc.mint_keypair, + non_circulating_key, + 500_000, + bank.confirmed_last_blockhash(), + )) + .expect("process transaction"); + + let request = create_test_request("getLargestAccounts", None); + let largest_accounts_result: RpcResponse> = + parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(largest_accounts_result.value.len(), 20); + + // Get mint balance + let request = create_test_request( + "getBalance", + Some(json!([rpc.mint_keypair.pubkey().to_string()])), + ); + let mint_balance_result: RpcResponse = + parse_success_result(rpc.handle_request_sync(request)); + assert!(largest_accounts_result.value.contains(&RpcAccountBalance { + address: rpc.mint_keypair.pubkey().to_string(), + lamports: mint_balance_result.value, + })); + + // Get non-circulating account balance + let request = + create_test_request("getBalance", Some(json!([non_circulating_key.to_string()]))); + let non_circulating_balance_result: RpcResponse = + parse_success_result(rpc.handle_request_sync(request)); + assert!(largest_accounts_result.value.contains(&RpcAccountBalance { + address: non_circulating_key.to_string(), + lamports: non_circulating_balance_result.value, + })); + + // Test Circulating/NonCirculating Filter + let request = create_test_request( + "getLargestAccounts", + Some(json!([{"filter":"circulating"}])), + ); + let largest_accounts_result: RpcResponse> = + parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(largest_accounts_result.value.len(), 20); + assert!(!largest_accounts_result.value.contains(&RpcAccountBalance { + address: non_circulating_key.to_string(), + lamports: non_circulating_balance_result.value, + })); + + let request = create_test_request( + "getLargestAccounts", + Some(json!([{"filter":"nonCirculating"}])), + ); + let largest_accounts_result: RpcResponse> = + parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(largest_accounts_result.value.len(), 1); + assert!(largest_accounts_result.value.contains(&RpcAccountBalance { + address: non_circulating_key.to_string(), + lamports: non_circulating_balance_result.value, + })); + } + + #[test] + fn test_rpc_get_minimum_balance_for_rent_exemption() { + let rpc = RpcHandler::start(); + let data_len = 50; + let request = + create_test_request("getMinimumBalanceForRentExemption", Some(json!([data_len]))); + let result: u64 = parse_success_result(rpc.handle_request_sync(request)); + let expected = rpc + .working_bank() + .get_minimum_balance_for_rent_exemption(data_len); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_get_inflation() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let request = create_test_request("getInflationGovernor", None); + let result: RpcInflationGovernor = parse_success_result(rpc.handle_request_sync(request)); + let expected: RpcInflationGovernor = bank.inflation().into(); + assert_eq!(result, expected); + + // Query inflation rate for current epoch + let request = create_test_request("getInflationRate", None); + let result: RpcInflationRate = parse_success_result(rpc.handle_request_sync(request)); + let inflation = bank.inflation(); + let epoch = bank.epoch(); + let slot_in_year = bank.slot_in_year_for_inflation(); + let expected = RpcInflationRate { + total: inflation.total(slot_in_year), + validator: inflation.validator(slot_in_year), + foundation: inflation.foundation(slot_in_year), + epoch, + }; + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_get_epoch_schedule() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let request = create_test_request("getEpochSchedule", None); + let result: EpochSchedule = parse_success_result(rpc.handle_request_sync(request)); + let expected = bank.epoch_schedule(); + assert_eq!(expected, &result); + } + + #[test] + fn test_rpc_get_account_info() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + + let request = create_test_request( + "getAccountInfo", + Some(json!([rpc.mint_keypair.pubkey().to_string()])), + ); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!({ + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "owner": "11111111111111111111111111111111", + "lamports": TEST_MINT_LAMPORTS, + "data": "", + "executable": false, + "rentEpoch": 0, + "space": 0, + }, + }); + assert_eq!(result, expected); + + let pubkey = Pubkey::new_unique(); + let address = pubkey.to_string(); + let data = vec![1, 2, 3, 4, 5]; + let account = AccountSharedData::create(42, data.clone(), Pubkey::default(), false, 0); + bank.store_account(&pubkey, &account); + + let request = create_test_request( + "getAccountInfo", + Some(json!([address, {"encoding": "base64"}])), + ); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([BASE64_STANDARD.encode(&data), "base64"]); + assert_eq!(result["value"]["data"], expected); + assert_eq!(result["value"]["space"], 5); + + let request = create_test_request( + "getAccountInfo", + Some(json!([address, {"encoding": "base64", "dataSlice": {"length": 2, "offset": 1}}])), + ); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([BASE64_STANDARD.encode(&data[1..3]), "base64"]); + assert_eq!(result["value"]["data"], expected); + assert_eq!(result["value"]["space"], 5); + + let request = create_test_request( + "getAccountInfo", + Some(json!([address, {"encoding": "binary", "dataSlice": {"length": 2, "offset": 1}}])), + ); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = bs58::encode(&data[1..3]).into_string(); + assert_eq!(result["value"]["data"], expected); + assert_eq!(result["value"]["space"], 5); + + let request = create_test_request( + "getAccountInfo", + Some( + json!([address, {"encoding": "jsonParsed", "dataSlice": {"length": 2, "offset": 1}}]), + ), + ); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([BASE64_STANDARD.encode(&data[1..3]), "base64"]); + assert_eq!( + result["value"]["data"], expected, + "should use data slice if parsing fails" + ); + } + + #[test] + fn test_encode_account_does_not_throw_when_slice_larger_than_account() { + let data = vec![42; 5]; + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::create(42, data, pubkey, false, 0); + let result = encode_account( + &account, + &pubkey, + UiAccountEncoding::Base58, + Some(UiDataSliceConfig { + length: account.data().len() + 1, + offset: 0, + }), + ); + assert!(result.is_ok()); + } + + #[test] + #[should_panic(expected = "should be less than 128 bytes")] // If ever `MAX_BASE58_BYTES` changes, the expected error message will need to be updated. + fn test_encode_account_throws_when_data_too_large_to_base58_encode() { + let data = vec![42; MAX_BASE58_BYTES + 1]; + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::create(42, data, pubkey, false, 0); + let _ = encode_account(&account, &pubkey, UiAccountEncoding::Base58, None).unwrap(); + } + + #[test] + fn test_encode_account_does_not_throw_despite_data_too_large_to_base58_encode_because_dataslice_makes_it_fit( + ) { + let data = vec![42; MAX_BASE58_BYTES + 1]; + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::create(42, data, pubkey, false, 0); + let result = encode_account( + &account, + &pubkey, + UiAccountEncoding::Base58, + Some(UiDataSliceConfig { + length: MAX_BASE58_BYTES, + offset: 1, + }), + ); + assert!(result.is_ok()); + } + + #[test] + fn test_encode_account_does_not_throw_despite_dataslice_being_too_large_to_base58_encode_because_account_is_small_enough_to_fit( + ) { + let data = vec![42; MAX_BASE58_BYTES]; + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::create(42, data, pubkey, false, 0); + let result = encode_account( + &account, + &pubkey, + UiAccountEncoding::Base58, + Some(UiDataSliceConfig { + length: MAX_BASE58_BYTES + 1, + offset: 0, + }), + ); + assert!(result.is_ok()); + } + + #[test] + fn test_encode_account_does_not_throw_despite_account_and_dataslice_being_too_large_to_base58_encode_because_their_intersection_fits( + ) { + let data = vec![42; MAX_BASE58_BYTES + 1]; + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::create(42, data, pubkey, false, 0); + let result = encode_account( + &account, + &pubkey, + UiAccountEncoding::Base58, + Some(UiDataSliceConfig { + length: MAX_BASE58_BYTES + 1, + offset: 1, + }), + ); + assert!(result.is_ok()); + } + + #[test] + fn test_rpc_get_multiple_accounts() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + + let non_existent_pubkey = Pubkey::new_unique(); + let pubkey = Pubkey::new_unique(); + let address = pubkey.to_string(); + let data = vec![1, 2, 3, 4, 5]; + let account = AccountSharedData::create(42, data.clone(), Pubkey::default(), false, 0); + bank.store_account(&pubkey, &account); + + // Test 3 accounts, one empty, one non-existent, and one with data + let request = create_test_request( + "getMultipleAccounts", + Some(json!([[ + rpc.mint_keypair.pubkey().to_string(), + non_existent_pubkey.to_string(), + address, + ]])), + ); + let result: RpcResponse = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([ + { + "owner": "11111111111111111111111111111111", + "lamports": TEST_MINT_LAMPORTS, + "data": ["", "base64"], + "executable": false, + "rentEpoch": 0, + "space": 0, + }, + null, + { + "owner": "11111111111111111111111111111111", + "lamports": 42, + "data": [BASE64_STANDARD.encode(&data), "base64"], + "executable": false, + "rentEpoch": 0, + "space": 5, + } + ]); + assert_eq!(result.value, expected); + + // Test config settings still work with multiple accounts + let request = create_test_request( + "getMultipleAccounts", + Some(json!([ + [ + rpc.mint_keypair.pubkey().to_string(), + non_existent_pubkey.to_string(), + address, + ], + {"encoding": "base58"}, + ])), + ); + let result: RpcResponse = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([ + { + "owner": "11111111111111111111111111111111", + "lamports": TEST_MINT_LAMPORTS, + "data": ["", "base58"], + "executable": false, + "rentEpoch": 0, + "space": 0, + }, + null, + { + "owner": "11111111111111111111111111111111", + "lamports": 42, + "data": [bs58::encode(&data).into_string(), "base58"], + "executable": false, + "rentEpoch": 0, + "space": 5, + } + ]); + assert_eq!(result.value, expected); + + let request = create_test_request( + "getMultipleAccounts", + Some(json!([ + [ + rpc.mint_keypair.pubkey().to_string(), + non_existent_pubkey.to_string(), + address, + ], + {"encoding": "jsonParsed", "dataSlice": {"length": 2, "offset": 1}}, + ])), + ); + let result: RpcResponse = parse_success_result(rpc.handle_request_sync(request)); + let expected = json!([ + { + "owner": "11111111111111111111111111111111", + "lamports": TEST_MINT_LAMPORTS, + "data": ["", "base64"], + "executable": false, + "rentEpoch": 0, + "space": 0, + }, + null, + { + "owner": "11111111111111111111111111111111", + "lamports": 42, + "data": [BASE64_STANDARD.encode(&data[1..3]), "base64"], + "executable": false, + "rentEpoch": 0, + "space": 5, + } + ]); + assert_eq!( + result.value, expected, + "should use data slice if parsing fails" + ); + } + + #[test] + fn test_rpc_get_program_accounts() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + + let new_program_id = Pubkey::new_unique(); + let new_program_account_key = Pubkey::new_unique(); + let new_program_account = AccountSharedData::new(42, 0, &new_program_id); + bank.store_account(&new_program_account_key, &new_program_account); + + let request = create_test_request( + "getProgramAccounts", + Some(json!([new_program_id.to_string()])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + let expected_value = vec![RpcKeyedAccount { + pubkey: new_program_account_key.to_string(), + account: UiAccount::encode( + &new_program_account_key, + &new_program_account, + UiAccountEncoding::Binary, + None, + None, + ), + }]; + assert_eq!(result, expected_value); + + // Test returns context + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + new_program_id.to_string(), + {"withContext": true}, + ])), + ); + let result: RpcResponse> = + parse_success_result(rpc.handle_request_sync(request)); + let expected = RpcResponse { + context: RpcResponseContext::new(0), + value: expected_value, + }; + assert_eq!(result, expected); + + // Set up nonce accounts to test filters + let nonce_authorities = (0..2) + .map(|_| { + let pubkey = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let account = AccountSharedData::new_data( + 42, + &nonce::state::Versions::new(nonce::State::new_initialized( + &authority, + DurableNonce::default(), + 1000, + )), + &system_program::id(), + ) + .unwrap(); + bank.store_account(&pubkey, &account); + authority + }) + .collect::>(); + + // Test memcmp filter; filter on Initialized state + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + system_program::id().to_string(), + {"filters": [{ + "memcmp": { + "offset": 4, + "bytes": bs58::encode(vec![1, 0, 0, 0]).into_string(), + }, + }]}, + ])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result.len(), 2); + + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + system_program::id().to_string(), + {"filters": [{ + "memcmp": { + "offset": 4, + "bytes": bs58::encode(vec![0, 0, 0, 0]).into_string(), + }, + }]}, + ])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result.len(), 0); + + // Test dataSize filter + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + system_program::id().to_string(), + {"filters": [{"dataSize": nonce::State::size()}]}, + ])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result.len(), 2); + + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + system_program::id().to_string(), + {"filters": [{"dataSize": 1}]}, + ])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result.len(), 0); + + // Test multiple filters + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + system_program::id().to_string(), + {"filters": [{ + "memcmp": { + "offset": 4, + "bytes": bs58::encode(vec![1, 0, 0, 0]).into_string(), + }, + }, { + "memcmp": { + "offset": 8, + "bytes": nonce_authorities[0].to_string(), + }, + }]}, // Filter on Initialized and Nonce authority + ])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result.len(), 1); + + let request = create_test_request( + "getProgramAccounts", + Some(json!([ + system_program::id().to_string(), + {"filters": [{ + "memcmp": { + "offset": 4, + "bytes": bs58::encode(vec![1, 0, 0, 0]).into_string(), + }, + }, { + "dataSize": 1, + }]}, // Filter on Initialized and non-matching data size + ])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result.len(), 0); + } + + #[test] + fn test_rpc_simulate_transaction() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0); + let recent_blockhash = bank.confirmed_last_blockhash(); + let RpcHandler { + ref meta, ref io, .. + } = rpc; + + let bob_pubkey = solana_sdk::pubkey::new_rand(); + let mut tx = system_transaction::transfer( + &rpc.mint_keypair, + &bob_pubkey, + rent_exempt_amount, + recent_blockhash, + ); + let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); + tx.signatures[0] = Signature::default(); + let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); + tx.message.recent_blockhash = Hash::default(); + let tx_invalid_recent_blockhash = bs58::encode(serialize(&tx).unwrap()).into_string(); + + // Simulation bank must be frozen + bank.freeze(); + + // Good signature with sigVerify=true + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ + "sigVerify": true, + "accounts": {{ + "encoding": "jsonParsed", + "addresses": ["{}", "{}"] + }} + }} + ] + }}"#, + tx_serialized_encoded, + solana_sdk::pubkey::new_rand(), + bob_pubkey, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": [ + null, + { + "data": ["", "base64"], + "executable": false, + "owner": "11111111111111111111111111111111", + "lamports": rent_exempt_amount, + "rentEpoch": u64::MAX, + "space": 0, + } + ], + "err":null, + "innerInstructions": null, + "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData":null, + "unitsConsumed":150, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Too many input accounts... + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{tx_serialized_encoded}", + {{ + "sigVerify": true, + "accounts": {{ + "addresses": [ + "11111111111111111111111111111111", + "11111111111111111111111111111111", + "11111111111111111111111111111111", + "11111111111111111111111111111111" + ] + }} + }} + ] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "error": { + "code": ErrorCode::InvalidParams.code(), + "message": "Too many accounts provided; max 3" + }, + "id":1 + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Bad signature with sigVerify=true + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{tx_badsig_serialized_encoded}", {{"sigVerify": true}}]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "error": { + "code": -32003, + "message": "Transaction signature verification failure" + }, + "id":1 + }); + + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Bad signature with sigVerify=false + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{tx_serialized_encoded}", {{"sigVerify": false}}]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts":null, + "err":null, + "innerInstructions":null, + "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData":null, + "unitsConsumed":150, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Bad signature with default sigVerify setting (false) + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{tx_serialized_encoded}"]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts":null, + "err":null, + "innerInstructions":null, + "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData": null, + "unitsConsumed":150, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Enabled both sigVerify=true and replaceRecentBlockhash=true + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {}]}}"#, + tx_serialized_encoded, + json!({ + "sigVerify": true, + "replaceRecentBlockhash": true, + }) + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "error": { + "code": ErrorCode::InvalidParams, + "message": "sigVerify may not be used with replaceRecentBlockhash" + }, + "id":1 + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Bad recent blockhash with replaceRecentBlockhash=false + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{tx_invalid_recent_blockhash}", {{"replaceRecentBlockhash": false}}]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "err":"BlockhashNotFound", + "accounts":null, + "innerInstructions":null, + "logs":[], + "replacementBlockhash": null, + "returnData": null, + "unitsConsumed":0, + } + }, + "id":1 + }); + + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // Bad recent blockhash with replaceRecentBlockhash=true + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{tx_invalid_recent_blockhash}", {{"replaceRecentBlockhash": true}}]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let latest_blockhash = bank.confirmed_last_blockhash(); + let expiry_slot = bank + .get_blockhash_last_valid_block_height(&latest_blockhash) + .expect("blockhash exists"); + + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts":null, + "err":null, + "innerInstructions":null, + "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ], + "replacementBlockhash": { + "blockhash": latest_blockhash.to_string(), + "lastValidBlockHeight": expiry_slot + }, + "returnData":null, + "unitsConsumed":150, + } + }, + "id": 1, + }); + + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_simulate_transaction_with_parsing_token_accounts() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let RpcHandler { + ref meta, ref io, .. + } = rpc; + + // init mint + let mint_rent_exempt_amount = + bank.get_minimum_balance_for_rent_exemption(spl_token::state::Mint::LEN); + let mint_pubkey = Pubkey::from_str("mint111111111111111111111111111111111111111").unwrap(); + let mut mint_data = [0u8; spl_token::state::Mint::LEN]; + Pack::pack_into_slice( + &spl_token::state::Mint { + mint_authority: COption::None, + supply: 0, + decimals: 8, + is_initialized: true, + freeze_authority: COption::None, + }, + &mut mint_data, + ); + let account = AccountSharedData::create( + mint_rent_exempt_amount, + mint_data.into(), + spl_token::id(), + false, + 0, + ); + bank.store_account(&mint_pubkey, &account); + + // init token account + let token_account_rent_exempt_amount = + bank.get_minimum_balance_for_rent_exemption(spl_token::state::Account::LEN); + let token_account_pubkey = Pubkey::new_unique(); + let owner_pubkey = Pubkey::from_str("owner11111111111111111111111111111111111111").unwrap(); + let mut token_account_data = [0u8; spl_token::state::Account::LEN]; + Pack::pack_into_slice( + &spl_token::state::Account { + mint: mint_pubkey, + owner: owner_pubkey, + amount: 1, + delegate: COption::None, + state: spl_token::state::AccountState::Initialized, + is_native: COption::None, + delegated_amount: 0, + close_authority: COption::None, + }, + &mut token_account_data, + ); + let account = AccountSharedData::create( + token_account_rent_exempt_amount, + token_account_data.into(), + spl_token::id(), + false, + 0, + ); + bank.store_account(&token_account_pubkey, &account); + + // prepare tx + let fee_payer = rpc.mint_keypair; + let recent_blockhash = bank.confirmed_last_blockhash(); + let tx = + system_transaction::transfer(&fee_payer, &token_account_pubkey, 1, recent_blockhash); + let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); + + // Simulation bank must be frozen + bank.freeze(); + + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ + "sigVerify": true, + "accounts": {{ + "encoding": "jsonParsed", + "addresses": ["{}", "{}"] + }} + }} + ] + }}"#, + tx_serialized_encoded, + solana_sdk::pubkey::new_rand(), + token_account_pubkey, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": [ + null, + { + "data": { + "parsed": { + "info": { + "isNative": false, + "mint": "mint111111111111111111111111111111111111111", + "owner": "owner11111111111111111111111111111111111111", + "state": "initialized", + "tokenAmount": { + "amount": "1", + "decimals": 8, + "uiAmount": 0.00000001, + "uiAmountString": "0.00000001" + } + }, + "type": "account" + }, + "program": "spl-token", + "space": 165 + }, + "executable": false, + "lamports": (token_account_rent_exempt_amount + 1), + "owner": bs58::encode(spl_token::id()).into_string(), + "rentEpoch": u64::MAX, + "space": spl_token::state::Account::LEN + }, + ], + "err": null, + "innerInstructions": null, + "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData": null, + "unitsConsumed": 150, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_simulate_transaction_with_inner_instructions() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let recent_blockhash = bank.confirmed_last_blockhash(); + let RpcHandler { + ref meta, ref io, .. + } = rpc; + + let recent_slot = 123; + let mut slot_hashes = SlotHashes::default(); + slot_hashes.add(recent_slot, Hash::new_unique()); + bank.set_sysvar_for_tests(&slot_hashes); + + let lookup_table_authority = Keypair::new(); + let lookup_table_space = address_lookup_table::state::LOOKUP_TABLE_META_SIZE; + let lookup_table_lamports = bank.get_minimum_balance_for_rent_exemption(lookup_table_space); + + let (instruction, lookup_table_address) = + address_lookup_table::instruction::create_lookup_table( + lookup_table_authority.pubkey(), + rpc.mint_keypair.pubkey(), + recent_slot, + ); + let tx = Transaction::new_signed_with_payer( + &[instruction], + Some(&rpc.mint_keypair.pubkey()), + &[&rpc.mint_keypair], + recent_blockhash, + ); + let tx_serialized_encoded = BASE64_STANDARD.encode(serialize(&tx).unwrap()); + + // Simulation bank must be frozen + bank.freeze(); + + // `innerInstructions` not provided, should not be in response + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ "encoding": "base64" }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": null, + "err":null, + "innerInstructions": null, + "logs":[ + "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program AddressLookupTab1e1111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData":null, + "unitsConsumed":1200, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // `innerInstructions` provided as `false`, should not be in response + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ "innerInstructions": false, "encoding": "base64" }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": null, + "err":null, + "innerInstructions": null, + "logs":[ + "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program AddressLookupTab1e1111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData":null, + "unitsConsumed":1200, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + + // `innerInstructions` provided as `true`, should have parsed inner instructions + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ "innerInstructions": true, "encoding": "base64" }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": 0, "apiVersion": RpcApiVersion::default()}, + "value":{ + "accounts": null, + "err":null, + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "parsed": { + "info": { + "destination": lookup_table_address.to_string(), + "lamports": lookup_table_lamports, + "source": rpc.mint_keypair.pubkey().to_string() + }, + "type": "transfer" + }, + "program": "system", + "programId": "11111111111111111111111111111111", + "stackHeight": 2 + }, + { + "parsed": { + "info": { + "account": lookup_table_address.to_string(), + "space": lookup_table_space + }, + "type": "allocate" + }, + "program": "system", + "programId": "11111111111111111111111111111111", + "stackHeight": 2 + }, + { + "parsed": { + "info": { + "account": lookup_table_address.to_string(), + "owner": "AddressLookupTab1e1111111111111111111111111" + }, + "type": "assign" + }, + "program": "system", + "programId": "11111111111111111111111111111111", + "stackHeight": 2 + } + ] + } + ], + "logs":[ + "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program AddressLookupTab1e1111111111111111111111111 success" + ], + "replacementBlockhash": null, + "returnData":null, + "unitsConsumed":1200, + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + + #[test] + #[should_panic(expected = "simulation bank must be frozen")] + fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let recent_blockhash = bank.confirmed_last_blockhash(); + let RpcHandler { + meta, + io, + mint_keypair, + .. + } = rpc; + + let bob_pubkey = Pubkey::new_unique(); + let tx = system_transaction::transfer(&mint_keypair, &bob_pubkey, 1234, recent_blockhash); + let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); + + assert!(!bank.is_frozen()); + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{tx_serialized_encoded}", {{"sigVerify": true}}]}}"#, + ); + + // should panic because `bank` is not frozen + let _ = io.handle_request_sync(&req, meta); + } + + #[test] + fn test_rpc_get_signature_statuses() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let recent_blockhash = bank.confirmed_last_blockhash(); + let confirmed_block_signatures = rpc.create_test_transactions_and_populate_blockstore(); + let RpcHandler { + mut meta, + io, + mint_keypair, + .. + } = rpc; + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, + confirmed_block_signatures[0] + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected_res: transaction::Result<()> = Ok(()); + let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); + let result: Option = + serde_json::from_value(json["result"]["value"][0].clone()) + .expect("actual response deserialization"); + let result = result.as_ref().unwrap(); + assert_eq!(expected_res, result.status); + assert_eq!(None, result.confirmations); + + // Test getSignatureStatus request on unprocessed tx + let bob_pubkey = solana_sdk::pubkey::new_rand(); + let tx = system_transaction::transfer( + &mint_keypair, + &bob_pubkey, + bank.get_minimum_balance_for_rent_exemption(0) + 10, + recent_blockhash, + ); + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, + tx.signatures[0] + ); + let res = io.handle_request_sync(&req, meta.clone()); + let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); + let result: Option = + serde_json::from_value(json["result"]["value"][0].clone()) + .expect("actual response deserialization"); + assert!(result.is_none()); + + // Test getSignatureStatus request on a TransactionError + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, + confirmed_block_signatures[1] + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected_res: transaction::Result<()> = Err(TransactionError::InstructionError( + 0, + InstructionError::Custom(1), + )); + let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); + let result: Option = + serde_json::from_value(json["result"]["value"][0].clone()) + .expect("actual response deserialization"); + assert_eq!(expected_res, result.as_ref().unwrap().status); + + // disable rpc-tx-history, but attempt historical query + meta.config.enable_rpc_transaction_history = false; + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"], {{"searchTransactionHistory": true}}]}}"#, + confirmed_block_signatures[1] + ); + let res = io.handle_request_sync(&req, meta); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32011,"message":"Transaction history is not available from this node"},"id":1}"#.to_string(), + ) + ); + } + + #[test] + fn test_rpc_fail_request_airdrop() { + let RpcHandler { meta, io, .. } = RpcHandler::start(); + + // Expect internal error because no faucet is available + let bob_pubkey = solana_sdk::pubkey::new_rand(); + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"requestAirdrop","params":["{bob_pubkey}", 50]}}"# + ); + let res = io.handle_request_sync(&req, meta); + let expected = + r#"{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"},"id":1}"#; + let expected: Response = + serde_json::from_str(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(result, expected); + } + + #[test] + fn test_rpc_send_bad_tx() { + let genesis = create_genesis_config(100); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let meta = JsonRpcRequestProcessor::new_from_bank(bank); + + let mut io = MetaIoHandler::default(); + io.extend_with(FullImpl.to_delegate()); + + let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#; + let res = io.handle_request_sync(req, meta); + let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); + let error = &json["error"]; + assert_eq!(error["code"], ErrorCode::InvalidParams.code()); + } + + #[test] + fn test_rpc_send_transaction_preflight() { + let exit = Arc::new(AtomicBool::new(false)); + let node_exit = create_node_exit(exit.clone()); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let (bank_forks, mint_keypair, ..) = new_bank_forks(); + + // Freeze bank 0 to prevent a panic in `run_transaction_simulation()` + bank_forks.write().unwrap().get(0).unwrap().freeze(); + + let mut io = MetaIoHandler::default(); + io.extend_with(FullImpl.to_delegate()); + let meta = JsonRpcRequestProcessor::new( + JsonRpcConfig::default(), + None, + bank_forks.clone(), + blockstore, + node_exit, + Hash::default(), + unbounded(), + None, + Arc::new(RwLock::new(LargestAccountsCache::new(30))), + Arc::new(AtomicU64::default()), + Arc::new(PrioritizationFeeCache::default()), + ); + + let mut bad_transaction = system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 42, + Hash::default(), + ); + + // sendTransaction will fail because the blockhash is invalid + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, + bs58::encode(serialize(&bad_transaction).unwrap()).into_string() + ); + let res = io.handle_request_sync(&req, meta.clone()); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","innerInstructions":null,"logs":[],"replacementBlockhash":null,"returnData":null,"unitsConsumed":0}},"id":1}"#.to_string(), + ) + ); + + // sendTransaction will fail due to insanity + bad_transaction.message.instructions[0].program_id_index = 0u8; + let recent_blockhash = bank_forks.read().unwrap().root_bank().last_blockhash(); + bad_transaction.sign(&[&mint_keypair], recent_blockhash); + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, + bs58::encode(serialize(&bad_transaction).unwrap()).into_string() + ); + let res = io.handle_request_sync(&req, meta.clone()); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid transaction: Transaction failed to sanitize accounts offsets correctly"},"id":1}"#.to_string(), + ) + ); + let mut bad_transaction = system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 42, + recent_blockhash, + ); + + // sendTransaction will fail due to invalid signature + bad_transaction.signatures[0] = Signature::default(); + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, + bs58::encode(serialize(&bad_transaction).unwrap()).into_string() + ); + let res = io.handle_request_sync(&req, meta.clone()); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Transaction signature verification failure"},"id":1}"#.to_string(), + ) + ); + + // sendTransaction will now succeed because skipPreflight=true even though it's a bad + // transaction + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}", {{"skipPreflight": true}}]}}"#, + bs58::encode(serialize(&bad_transaction).unwrap()).into_string() + ); + let res = io.handle_request_sync(&req, meta.clone()); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","result":"1111111111111111111111111111111111111111111111111111111111111111","id":1}"#.to_string(), + ) + ); + + // sendTransaction will fail due to sanitization failure + bad_transaction.signatures.clear(); + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, + bs58::encode(serialize(&bad_transaction).unwrap()).into_string() + ); + let res = io.handle_request_sync(&req, meta); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid transaction: Transaction failed to sanitize accounts offsets correctly"},"id":1}"#.to_string(), + ) + ); + } + + #[test] + fn test_rpc_verify_filter() { + let filter = RpcFilterType::Memcmp(Memcmp::new( + 0, // offset + MemcmpEncodedBytes::Base58("13LeFbG6m2EP1fqCj9k66fcXsoTHMMtgr7c78AivUrYD".to_string()), // encoded bytes + )); + assert_eq!(verify_filter(&filter), Ok(())); + // Invalid base-58 + let filter = RpcFilterType::Memcmp(Memcmp::new( + 0, // offset + MemcmpEncodedBytes::Base58("III".to_string()), // encoded bytes + )); + assert!(verify_filter(&filter).is_err()); + } + + #[test] + fn test_rpc_verify_pubkey() { + let pubkey = solana_sdk::pubkey::new_rand(); + assert_eq!(verify_pubkey(&pubkey.to_string()).unwrap(), pubkey); + let bad_pubkey = "a1b2c3d4"; + assert_eq!( + verify_pubkey(bad_pubkey), + Err(Error::invalid_params("Invalid param: WrongSize")) + ); + } + + #[test] + fn test_rpc_verify_signature() { + let tx = system_transaction::transfer( + &Keypair::new(), + &solana_sdk::pubkey::new_rand(), + 20, + hash(&[0]), + ); + assert_eq!( + verify_signature(&tx.signatures[0].to_string()).unwrap(), + tx.signatures[0] + ); + let bad_signature = "a1b2c3d4"; + assert_eq!( + verify_signature(bad_signature), + Err(Error::invalid_params("Invalid param: WrongSize")) + ); + } + + fn new_bank_forks() -> (Arc>, Keypair, Arc) { + new_bank_forks_with_config(BankTestConfig::default()) + } + + fn new_bank_forks_with_config( + config: BankTestConfig, + ) -> (Arc>, Keypair, Arc) { + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + voting_keypair, + .. + } = create_genesis_config(TEST_MINT_LAMPORTS); + + genesis_config.rent.lamports_per_byte_year = 50; + genesis_config.rent.exemption_threshold = 2.0; + genesis_config.epoch_schedule = + EpochSchedule::custom(TEST_SLOTS_PER_EPOCH, TEST_SLOTS_PER_EPOCH, false); + genesis_config.fee_rate_governor = FeeRateGovernor::new(TEST_SIGNATURE_FEE, 0); + + let bank = Bank::new_for_tests_with_config(&genesis_config, config); + ( + BankForks::new_rw_arc(bank), + mint_keypair, + Arc::new(voting_keypair), + ) + } + + #[test] + fn test_rpc_get_version() { + let rpc = RpcHandler::start(); + let request = create_test_request("getVersion", None); + let result: Value = parse_success_result(rpc.handle_request_sync(request)); + let expected = { + let version = solana_version::Version::default(); + json!({ + "solana-core": version.to_string(), + "feature-set": version.feature_set, + }) + }; + assert_eq!(result, expected); + } + + #[test] + fn test_get_block_with_versioned_tx() { + let rpc = RpcHandler::start(); + + let bank = rpc.working_bank(); + // Slot hashes is necessary for processing versioned txs. + bank.set_sysvar_for_tests(&SlotHashes::default()); + // Add both legacy and version #0 transactions to the block + rpc.create_test_versioned_transactions_and_populate_blockstore(None); + + let request = create_test_request( + "getBlock", + Some(json!([ + 0u64, + {"maxSupportedTransactionVersion": 0}, + ])), + ); + let result: Option = + parse_success_result(rpc.handle_request_sync(request)); + let confirmed_block = result.unwrap(); + assert_eq!(confirmed_block.transactions.len(), 2); + assert_eq!( + confirmed_block.transactions[0].version, + Some(TransactionVersion::LEGACY) + ); + assert_eq!( + confirmed_block.transactions[1].version, + Some(TransactionVersion::Number(0)) + ); + + let request = create_test_request("getBlock", Some(json!([0u64,]))); + let response = parse_failure_response(rpc.handle_request_sync(request)); + let expected = ( + JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION, + String::from( + "Transaction version (0) is not supported by the requesting client. \ + Please try the request again with the following configuration parameter: \ + \"maxSupportedTransactionVersion\": 0", + ), + ); + assert_eq!(response, expected); + } + + #[test] + fn test_get_block() { + let mut rpc = RpcHandler::start(); + let confirmed_block_signatures = rpc.create_test_transactions_and_populate_blockstore(); + + let request = create_test_request("getBlock", Some(json!([0u64]))); + let result: Option = + parse_success_result(rpc.handle_request_sync(request)); + + let confirmed_block = result.unwrap(); + assert_eq!(confirmed_block.transactions.len(), 2); + assert_eq!(confirmed_block.rewards, vec![]); + + for EncodedTransactionWithStatusMeta { + transaction, + meta, + version, + } in confirmed_block.transactions.into_iter() + { + assert_eq!( + version, None, + "requests which don't set max_supported_transaction_version shouldn't receive a version" + ); + if let EncodedTransaction::Json(transaction) = transaction { + if transaction.signatures[0] == confirmed_block_signatures[0].to_string() { + let meta = meta.unwrap(); + assert_eq!(meta.status, Ok(())); + assert_eq!(meta.err, None); + } else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() { + let meta = meta.unwrap(); + assert_eq!( + meta.err, + Some(TransactionError::InstructionError( + 0, + InstructionError::Custom(1) + )) + ); + assert_eq!( + meta.status, + Err(TransactionError::InstructionError( + 0, + InstructionError::Custom(1) + )) + ); + } else { + assert_eq!(meta, None); + } + } + } + + let request = create_test_request("getBlock", Some(json!([0u64, "binary"]))); + let result: Option = + parse_success_result(rpc.handle_request_sync(request)); + let confirmed_block = result.unwrap(); + assert_eq!(confirmed_block.transactions.len(), 2); + assert_eq!(confirmed_block.rewards, vec![]); + + for EncodedTransactionWithStatusMeta { + transaction, + meta, + version, + } in confirmed_block.transactions.into_iter() + { + assert_eq!( + version, None, + "requests which don't set max_supported_transaction_version shouldn't receive a version" + ); + if let EncodedTransaction::LegacyBinary(transaction) = transaction { + let decoded_transaction: Transaction = + deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap(); + if decoded_transaction.signatures[0] == confirmed_block_signatures[0] { + let meta = meta.unwrap(); + assert_eq!(meta.status, Ok(())); + assert_eq!(meta.err, None); + } else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] { + let meta = meta.unwrap(); + assert_eq!( + meta.err, + Some(TransactionError::InstructionError( + 0, + InstructionError::Custom(1) + )) + ); + assert_eq!( + meta.status, + Err(TransactionError::InstructionError( + 0, + InstructionError::Custom(1) + )) + ); + } else { + assert_eq!(meta, None); + } + } + } + + // disable rpc-tx-history + rpc.meta.config.enable_rpc_transaction_history = false; + let request = create_test_request("getBlock", Some(json!([0u64]))); + let response = parse_failure_response(rpc.handle_request_sync(request)); + let expected = ( + JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE, + String::from("Transaction history is not available from this node"), + ); + assert_eq!(response, expected); + } + + #[test] + fn test_get_block_config() { + let rpc = RpcHandler::start(); + let confirmed_block_signatures = rpc.create_test_transactions_and_populate_blockstore(); + + let request = create_test_request( + "getBlock", + Some(json!([ + 0u64, + RpcBlockConfig { + encoding: None, + transaction_details: Some(TransactionDetails::Signatures), + rewards: Some(false), + commitment: None, + max_supported_transaction_version: None, + }, + ])), + ); + let result: Option = + parse_success_result(rpc.handle_request_sync(request)); + + let confirmed_block = result.unwrap(); + assert!(confirmed_block.transactions.is_none()); + assert!(confirmed_block.rewards.is_none()); + for (i, signature) in confirmed_block.signatures.unwrap()[..2].iter().enumerate() { + assert_eq!(*signature, confirmed_block_signatures[i].to_string()); + } + + let request = create_test_request( + "getBlock", + Some(json!([ + 0u64, + RpcBlockConfig { + encoding: None, + transaction_details: Some(TransactionDetails::None), + rewards: Some(true), + commitment: None, + max_supported_transaction_version: None, + }, + ])), + ); + let result: Option = + parse_success_result(rpc.handle_request_sync(request)); + let confirmed_block = result.unwrap(); + assert!(confirmed_block.transactions.is_none()); + assert!(confirmed_block.signatures.is_none()); + assert_eq!(confirmed_block.rewards.unwrap(), vec![]); + } + + #[test] + fn test_get_blocks() { + let rpc = RpcHandler::start(); + let _ = rpc.create_test_transactions_and_populate_blockstore(); + rpc.add_roots_to_blockstore(vec![0, 1, 3, 4, 8]); + + let request = create_test_request("getBlocks", Some(json!([0u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![0, 1, 3, 4, 8]); + + let request = create_test_request("getBlocks", Some(json!([2u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![3, 4, 8]); + + let request = create_test_request("getBlocks", Some(json!([0u64, 4u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![0, 1, 3, 4]); + + let request = create_test_request("getBlocks", Some(json!([0u64, 7u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![0, 1, 3, 4]); + + let request = create_test_request("getBlocks", Some(json!([9u64, 11u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, Vec::::new()); + + let request = create_test_request( + "getBlocks", + Some(json!([0u64, MAX_GET_CONFIRMED_BLOCKS_RANGE])), + ); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![0, 1, 3, 4, 8]); + + // let request = create_test_request( + // "getBlocks", + // Some(json!([0u64, MAX_GET_CONFIRMED_BLOCKS_RANGE + 1])), + // ); + // let response = parse_failure_response(rpc.handle_request_sync(request)); + // let expected = ( + // ErrorCode::InvalidParams.code(), + // String::from("Slot range too large; max 500000"), + // ); + // assert_eq!(response, expected); + } + + #[test] + fn test_get_blocks_with_limit() { + let rpc = RpcHandler::start(); + rpc.add_roots_to_blockstore(vec![0, 1, 3, 4, 8]); + + let request = create_test_request("getBlocksWithLimit", Some(json!([0u64, 500_001u64]))); + let response = parse_failure_response(rpc.handle_request_sync(request)); + let expected = ( + ErrorCode::InvalidParams.code(), + String::from("Limit too large; max 500000"), + ); + assert_eq!(response, expected); + + let request = create_test_request("getBlocksWithLimit", Some(json!([0u64, 0u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, Vec::::new()); + + let request = create_test_request("getBlocksWithLimit", Some(json!([2u64, 2u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![3, 4]); + + let request = create_test_request("getBlocksWithLimit", Some(json!([2u64, 3u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![3, 4, 8]); + + let request = create_test_request("getBlocksWithLimit", Some(json!([2u64, 500_000u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, vec![3, 4, 8]); + + let request = create_test_request("getBlocksWithLimit", Some(json!([9u64, 500_000u64]))); + let result: Vec = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(result, Vec::::new()); + } + + #[test] + fn test_get_block_time() { + let rpc = RpcHandler::start(); + rpc.add_roots_to_blockstore(vec![1, 2, 3, 4, 5, 6, 7]); + + let base_timestamp = rpc + .bank_forks + .read() + .unwrap() + .get(0) + .unwrap() + .unix_timestamp_from_genesis(); + + let slot_duration = slot_duration_from_slots_per_year(rpc.working_bank().slots_per_year()); + + let request = create_test_request("getBlockTime", Some(json!([2u64]))); + let result: Option = parse_success_result(rpc.handle_request_sync(request)); + let expected = Some(base_timestamp); + assert_eq!(result, expected); + + let request = create_test_request("getBlockTime", Some(json!([7u64]))); + let result: Option = parse_success_result(rpc.handle_request_sync(request)); + let expected = Some(base_timestamp + (7 * slot_duration).as_secs() as i64); + assert_eq!(result, expected); + + let request = create_test_request("getBlockTime", Some(json!([12345u64]))); + let response = parse_failure_response(rpc.handle_request_sync(request)); + let expected = ( + JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE, + String::from("Block not available for slot 12345"), + ); + assert_eq!(response, expected); + } + + #[test] + fn test_is_finalized() { + let bank = Arc::new(Bank::default_for_tests()); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + blockstore.set_roots([0, 1].iter()).unwrap(); + // Build BlockCommitmentCache with rooted slots + let mut cache0 = BlockCommitment::default(); + cache0.increase_rooted_stake(50); + let mut cache1 = BlockCommitment::default(); + cache1.increase_rooted_stake(40); + let mut cache2 = BlockCommitment::default(); + cache2.increase_rooted_stake(20); + + let mut block_commitment = HashMap::new(); + block_commitment.entry(1).or_insert(cache0); + block_commitment.entry(2).or_insert(cache1); + block_commitment.entry(3).or_insert(cache2); + + assert!(is_finalized(&bank, &blockstore, 0)); + assert!(is_finalized(&bank, &blockstore, 1)); + assert!(!is_finalized(&bank, &blockstore, 2)); + assert!(!is_finalized(&bank, &blockstore, 3)); + } + + #[test] + fn test_token_rpcs() { + for program_id in solana_account_decoder::parse_token::spl_token_ids() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let RpcHandler { io, meta, .. } = rpc; + let mint = SplTokenPubkey::new_from_array([2; 32]); + let owner = SplTokenPubkey::new_from_array([3; 32]); + let delegate = SplTokenPubkey::new_from_array([4; 32]); + let token_account_pubkey = solana_sdk::pubkey::new_rand(); + let token_with_different_mint_pubkey = solana_sdk::pubkey::new_rand(); + let new_mint = SplTokenPubkey::new_from_array([5; 32]); + if program_id == token_2022::id() { + // Add the token account + let account_base = TokenAccount { + mint, + owner, + delegate: COption::Some(delegate), + amount: 420, + state: TokenAccountState::Initialized, + is_native: COption::None, + delegated_amount: 30, + close_authority: COption::Some(owner), + }; + let account_size = ExtensionType::try_calculate_account_len::(&[ + ExtensionType::ImmutableOwner, + ExtensionType::MemoTransfer, + ]) + .unwrap(); + let mut account_data = vec![0; account_size]; + let mut account_state = + StateWithExtensionsMut::::unpack_uninitialized(&mut account_data) + .unwrap(); + + account_state.base = account_base; + account_state.pack_base(); + account_state.init_account_type().unwrap(); + account_state + .init_extension::(true) + .unwrap(); + let memo_transfer = account_state.init_extension::(true).unwrap(); + memo_transfer.require_incoming_transfer_memos = true.into(); + + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&token_account_pubkey, &token_account); + + // Add the mint + let mint_size = ExtensionType::try_calculate_account_len::(&[ + ExtensionType::MintCloseAuthority, + ]) + .unwrap(); + let mint_base = Mint { + mint_authority: COption::Some(owner), + supply: 500, + decimals: 2, + is_initialized: true, + freeze_authority: COption::Some(owner), + }; + let mut mint_data = vec![0; mint_size]; + let mut mint_state = + StateWithExtensionsMut::::unpack_uninitialized(&mut mint_data).unwrap(); + + mint_state.base = mint_base; + mint_state.pack_base(); + mint_state.init_account_type().unwrap(); + let mint_close_authority = mint_state + .init_extension::(true) + .unwrap(); + mint_close_authority.close_authority = + OptionalNonZeroPubkey::try_from(Some(owner)).unwrap(); + + let mint_account = AccountSharedData::from(Account { + lamports: 111, + data: mint_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); + + // Add another token account with the same owner, delegate, and mint + let other_token_account_pubkey = solana_sdk::pubkey::new_rand(); + bank.store_account(&other_token_account_pubkey, &token_account); + + // Add another token account with the same owner and delegate but different mint + let mut account_data = vec![0; TokenAccount::get_packed_len()]; + let token_account = TokenAccount { + mint: new_mint, + owner, + delegate: COption::Some(delegate), + amount: 42, + state: TokenAccountState::Initialized, + is_native: COption::None, + delegated_amount: 30, + close_authority: COption::Some(owner), + }; + TokenAccount::pack(token_account, &mut account_data).unwrap(); + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&token_with_different_mint_pubkey, &token_account); + } else { + // Add the token account + let mut account_data = vec![0; TokenAccount::get_packed_len()]; + let token_account = TokenAccount { + mint, + owner, + delegate: COption::Some(delegate), + amount: 420, + state: TokenAccountState::Initialized, + is_native: COption::None, + delegated_amount: 30, + close_authority: COption::Some(owner), + }; + TokenAccount::pack(token_account, &mut account_data).unwrap(); + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&token_account_pubkey, &token_account); + + // Add the mint + let mut mint_data = vec![0; Mint::get_packed_len()]; + let mint_state = Mint { + mint_authority: COption::Some(owner), + supply: 500, + decimals: 2, + is_initialized: true, + freeze_authority: COption::Some(owner), + }; + Mint::pack(mint_state, &mut mint_data).unwrap(); + let mint_account = AccountSharedData::from(Account { + lamports: 111, + data: mint_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); + + // Add another token account with the same owner, delegate, and mint + let other_token_account_pubkey = solana_sdk::pubkey::new_rand(); + bank.store_account(&other_token_account_pubkey, &token_account); + + // Add another token account with the same owner and delegate but different mint + let mut account_data = vec![0; TokenAccount::get_packed_len()]; + let token_account = TokenAccount { + mint: new_mint, + owner, + delegate: COption::Some(delegate), + amount: 42, + state: TokenAccountState::Initialized, + is_native: COption::None, + delegated_amount: 30, + close_authority: COption::Some(owner), + }; + TokenAccount::pack(token_account, &mut account_data).unwrap(); + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&token_with_different_mint_pubkey, &token_account); + } + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{token_account_pubkey}"]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let balance: UiTokenAmount = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + let error = f64::EPSILON; + assert!((balance.ui_amount.unwrap() - 4.2).abs() < error); + assert_eq!(balance.amount, 420.to_string()); + assert_eq!(balance.decimals, 2); + assert_eq!(balance.ui_amount_string, "4.2".to_string()); + + // Test non-existent token account + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{}"]}}"#, + solana_sdk::pubkey::new_rand(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert!(result.get("error").is_some()); + + // Test get token supply, pulls supply from mint + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{mint}"]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let supply: UiTokenAmount = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + let error = f64::EPSILON; + assert!((supply.ui_amount.unwrap() - 5.0).abs() < error); + assert_eq!(supply.amount, 500.to_string()); + assert_eq!(supply.decimals, 2); + assert_eq!(supply.ui_amount_string, "5".to_string()); + + // Test non-existent mint address + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{}"]}}"#, + solana_sdk::pubkey::new_rand(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert!(result.get("error").is_some()); + + // Test getTokenAccountsByOwner with Token program id returns all accounts, regardless of Mint address + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByOwner", + "params":["{owner}", {{"programId": "{program_id}"}}, {{"encoding":"base64"}}] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!(accounts.len(), 3); + + // Test getTokenAccountsByOwner with jsonParsed encoding doesn't return accounts with invalid mints + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByOwner", + "params":["{owner}", {{"programId": "{program_id}"}}, {{"encoding": "jsonParsed"}}] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!(accounts.len(), 2); + + // Test getProgramAccounts with jsonParsed encoding returns mints, but doesn't return accounts with invalid mints + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getProgramAccounts", + "params":["{program_id}", {{"encoding": "jsonParsed"}}] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"].clone()).unwrap(); + if program_id == solana_inline_spl::token::id() { + // native mint is included for token-v3 + assert_eq!(accounts.len(), 4); + } else { + assert_eq!(accounts.len(), 3); + } + + // Test returns only mint accounts + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1,"method":"getTokenAccountsByOwner", + "params":["{owner}", {{"mint": "{mint}"}}, {{"encoding":"base64"}}] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!(accounts.len(), 2); + + // Test non-existent Mint/program id + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByOwner", + "params":["{}", {{"programId": "{}"}}] + }}"#, + owner, + solana_sdk::pubkey::new_rand(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert!(result.get("error").is_some()); + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByOwner", + "params":["{}", {{"mint": "{}"}}] + }}"#, + owner, + solana_sdk::pubkey::new_rand(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert!(result.get("error").is_some()); + + // Test non-existent Owner + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByOwner", + "params":["{}", {{"programId": "{}"}}] + }}"#, + solana_sdk::pubkey::new_rand(), + program_id, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert!(accounts.is_empty()); + + // Test getTokenAccountsByDelegate with Token program id returns all accounts, regardless of Mint address + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByDelegate", + "params":["{delegate}", {{"programId": "{program_id}"}}, {{"encoding":"base64"}}] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!(accounts.len(), 3); + + // Test returns only mint accounts + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1,"method": + "getTokenAccountsByDelegate", + "params":["{delegate}", {{"mint": "{mint}"}}, {{"encoding":"base64"}}] + }}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!(accounts.len(), 2); + + // Test non-existent Mint/program id + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByDelegate", + "params":["{}", {{"programId": "{}"}}] + }}"#, + delegate, + solana_sdk::pubkey::new_rand(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert!(result.get("error").is_some()); + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByDelegate", + "params":["{}", {{"mint": "{}"}}] + }}"#, + delegate, + solana_sdk::pubkey::new_rand(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert!(result.get("error").is_some()); + + // Test non-existent Delegate + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getTokenAccountsByDelegate", + "params":["{}", {{"programId": "{}"}}] + }}"#, + solana_sdk::pubkey::new_rand(), + program_id, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert!(accounts.is_empty()); + + // Add new_mint, and another token account on new_mint with different balance + let mut mint_data = vec![0; Mint::get_packed_len()]; + let mint_state = Mint { + mint_authority: COption::Some(owner), + supply: 500, + decimals: 2, + is_initialized: true, + freeze_authority: COption::Some(owner), + }; + Mint::pack(mint_state, &mut mint_data).unwrap(); + let mint_account = AccountSharedData::from(Account { + lamports: 111, + data: mint_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account( + &Pubkey::from_str(&new_mint.to_string()).unwrap(), + &mint_account, + ); + let mut account_data = vec![0; TokenAccount::get_packed_len()]; + let token_account = TokenAccount { + mint: new_mint, + owner, + delegate: COption::Some(delegate), + amount: 10, + state: TokenAccountState::Initialized, + is_native: COption::None, + delegated_amount: 30, + close_authority: COption::Some(owner), + }; + TokenAccount::pack(token_account, &mut account_data).unwrap(); + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + let token_with_smaller_balance = solana_sdk::pubkey::new_rand(); + bank.store_account(&token_with_smaller_balance, &token_account); + + // Test largest token accounts + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenLargestAccounts","params":["{new_mint}"]}}"#, + ); + let res = io.handle_request_sync(&req, meta); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let largest_accounts: Vec = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!( + largest_accounts, + vec![ + RpcTokenAccountBalance { + address: token_with_different_mint_pubkey.to_string(), + amount: UiTokenAmount { + ui_amount: Some(0.42), + decimals: 2, + amount: "42".to_string(), + ui_amount_string: "0.42".to_string(), + } + }, + RpcTokenAccountBalance { + address: token_with_smaller_balance.to_string(), + amount: UiTokenAmount { + ui_amount: Some(0.1), + decimals: 2, + amount: "10".to_string(), + ui_amount_string: "0.1".to_string(), + } + } + ] + ); + } + } + + #[test] + fn test_token_parsing() { + for program_id in solana_account_decoder::parse_token::spl_token_ids() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + let RpcHandler { io, meta, .. } = rpc; + + let mint = SplTokenPubkey::new_from_array([2; 32]); + let owner = SplTokenPubkey::new_from_array([3; 32]); + let delegate = SplTokenPubkey::new_from_array([4; 32]); + let token_account_pubkey = solana_sdk::pubkey::new_rand(); + let amount = 420; + let delegated_amount = 30; + let rent_exempt_amount = 10; + let supply = 500; + let decimals = 2; + let (program_name, account_size, mint_size, additional_data) = if program_id + == token_2022::id() + { + let account_base = TokenAccount { + mint, + owner, + delegate: COption::Some(delegate), + amount, + state: TokenAccountState::Initialized, + is_native: COption::Some(rent_exempt_amount), + delegated_amount, + close_authority: COption::Some(owner), + }; + let account_size = ExtensionType::try_calculate_account_len::(&[ + ExtensionType::ImmutableOwner, + ExtensionType::MemoTransfer, + ]) + .unwrap(); + let mut account_data = vec![0; account_size]; + let mut account_state = + StateWithExtensionsMut::::unpack_uninitialized(&mut account_data) + .unwrap(); + + account_state.base = account_base; + account_state.pack_base(); + account_state.init_account_type().unwrap(); + account_state + .init_extension::(true) + .unwrap(); + let memo_transfer = account_state.init_extension::(true).unwrap(); + memo_transfer.require_incoming_transfer_memos = true.into(); + + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&token_account_pubkey, &token_account); + + let mint_size = ExtensionType::try_calculate_account_len::(&[ + ExtensionType::MintCloseAuthority, + ExtensionType::InterestBearingConfig, + ]) + .unwrap(); + let mint_base = Mint { + mint_authority: COption::Some(owner), + supply, + decimals, + is_initialized: true, + freeze_authority: COption::Some(owner), + }; + let mut mint_data = vec![0; mint_size]; + let mut mint_state = + StateWithExtensionsMut::::unpack_uninitialized(&mut mint_data).unwrap(); + + mint_state.base = mint_base; + mint_state.pack_base(); + mint_state.init_account_type().unwrap(); + let mint_close_authority = mint_state + .init_extension::(true) + .unwrap(); + mint_close_authority.close_authority = + OptionalNonZeroPubkey::try_from(Some(owner)).unwrap(); + let interest_bearing_config = mint_state + .init_extension::(true) + .unwrap(); + interest_bearing_config.initialization_timestamp = + bank.clock().unix_timestamp.saturating_sub(1_000_000).into(); + interest_bearing_config.pre_update_average_rate = 500.into(); + interest_bearing_config.last_update_timestamp = bank.clock().unix_timestamp.into(); + interest_bearing_config.current_rate = 500.into(); + + let additional_data = SplTokenAdditionalData { + decimals, + interest_bearing_config: Some(( + *interest_bearing_config, + bank.clock().unix_timestamp, + )), + }; + + let mint_account = AccountSharedData::from(Account { + lamports: 111, + data: mint_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); + ("spl-token-2022", account_size, mint_size, additional_data) + } else { + let account_size = TokenAccount::get_packed_len(); + let mut account_data = vec![0; account_size]; + let token_account = TokenAccount { + mint, + owner, + delegate: COption::Some(delegate), + amount, + state: TokenAccountState::Initialized, + is_native: COption::Some(rent_exempt_amount), + delegated_amount, + close_authority: COption::Some(owner), + }; + TokenAccount::pack(token_account, &mut account_data).unwrap(); + let token_account = AccountSharedData::from(Account { + lamports: 111, + data: account_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&token_account_pubkey, &token_account); + + // Add the mint + let mint_size = Mint::get_packed_len(); + let mut mint_data = vec![0; mint_size]; + let mint_state = Mint { + mint_authority: COption::Some(owner), + supply, + decimals, + is_initialized: true, + freeze_authority: COption::Some(owner), + }; + Mint::pack(mint_state, &mut mint_data).unwrap(); + let mint_account = AccountSharedData::from(Account { + lamports: 111, + data: mint_data.to_vec(), + owner: program_id, + ..Account::default() + }); + bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); + let additional_data = SplTokenAdditionalData { + decimals, + interest_bearing_config: None, + }; + ("spl-token", account_size, mint_size, additional_data) + }; + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{token_account_pubkey}", {{"encoding": "jsonParsed"}}]}}"#, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let token_ui_amount = token_amount_to_ui_amount_v2(amount, &additional_data); + let delegated_ui_amount = + token_amount_to_ui_amount_v2(delegated_amount, &additional_data); + let rent_exempt_ui_amount = + token_amount_to_ui_amount_v2(rent_exempt_amount, &additional_data); + let mut expected_value = json!({ + "program": program_name, + "space": account_size, + "parsed": { + "type": "account", + "info": { + "mint": mint.to_string(), + "owner": owner.to_string(), + "tokenAmount": json!(token_ui_amount), + "delegate": delegate.to_string(), + "state": "initialized", + "isNative": true, + "rentExemptReserve": json!(rent_exempt_ui_amount), + "delegatedAmount": json!(delegated_ui_amount), + "closeAuthority": owner.to_string(), + } + } + }); + if program_id == token_2022::id() { + expected_value["parsed"]["info"]["extensions"] = json!([ + { + "extension": "immutableOwner" + }, + { + "extension": "memoTransfer", + "state": { + "requireIncomingTransferMemos": true + } + }, + ]); + } + assert_eq!(result["result"]["value"]["data"], expected_value); + + // Test Mint + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{mint}", {{"encoding": "jsonParsed"}}]}}"#, + ); + let res = io.handle_request_sync(&req, meta); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let mut expected_value = json!({ + "program": program_name, + "space": mint_size, + "parsed": { + "type": "mint", + "info": { + "mintAuthority": owner.to_string(), + "decimals": 2, + "supply": "500".to_string(), + "isInitialized": true, + "freezeAuthority": owner.to_string(), + } + } + }); + if program_id == token_2022::id() { + expected_value["parsed"]["info"]["extensions"] = json!([ + { + "extension": "mintCloseAuthority", + "state": { + "closeAuthority": owner.to_string(), + } + }, + { + "extension": "interestBearingConfig", + "state": { + "currentRate": 500, + "initializationTimestamp": bank.clock().unix_timestamp.saturating_sub(1_000_000), + "lastUpdateTimestamp": bank.clock().unix_timestamp, + "preUpdateAverageRate": 500, + "rateAuthority": null, + } + } + ]); + } + assert_eq!(result["result"]["value"]["data"], expected_value,); + } + } + + #[test] + fn test_get_spl_token_owner_filter() { + // Filtering on token-v3 length + let owner = Pubkey::new_unique(); + assert_eq!( + get_spl_token_owner_filter( + &spl_token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::DataSize(165) + ], + ) + .unwrap(), + owner + ); + + // Filtering on token-2022 account type + assert_eq!( + get_spl_token_owner_filter( + &token_2022::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), + ], + ) + .unwrap(), + owner + ); + + // Filtering on token account state + assert_eq!( + get_spl_token_owner_filter( + &token_2022::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::TokenAccountState, + ], + ) + .unwrap(), + owner + ); + + // Can't filter on account type for token-v3 + assert!(get_spl_token_owner_filter( + &solana_inline_spl::token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), + ], + ) + .is_none()); + + // Filtering on mint instead of owner + assert!(get_spl_token_owner_filter( + &solana_inline_spl::token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, owner.to_bytes().to_vec())), + RpcFilterType::DataSize(165) + ], + ) + .is_none()); + + // Wrong program id + assert!(get_spl_token_owner_filter( + &Pubkey::new_unique(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::DataSize(165) + ], + ) + .is_none()); + assert!(get_spl_token_owner_filter( + &Pubkey::new_unique(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), + ], + ) + .is_none()); + } + + #[test] + fn test_get_spl_token_mint_filter() { + // Filtering on token-v3 length + let mint = Pubkey::new_unique(); + assert_eq!( + get_spl_token_mint_filter( + &solana_inline_spl::token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::DataSize(165) + ], + ) + .unwrap(), + mint + ); + + // Filtering on token-2022 account type + assert_eq!( + get_spl_token_mint_filter( + &token_2022::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), + ], + ) + .unwrap(), + mint + ); + + // Filtering on token account state + assert_eq!( + get_spl_token_mint_filter( + &solana_inline_spl::token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::TokenAccountState, + ], + ) + .unwrap(), + mint + ); + + // Can't filter on account type for token-v3 + assert!(get_spl_token_mint_filter( + &solana_inline_spl::token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), + ], + ) + .is_none()); + + // Filtering on owner instead of mint + assert!(get_spl_token_mint_filter( + &solana_inline_spl::token::id(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, mint.to_bytes().to_vec())), + RpcFilterType::DataSize(165) + ], + ) + .is_none()); + + // Wrong program id + assert!(get_spl_token_mint_filter( + &Pubkey::new_unique(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::DataSize(165) + ], + ) + .is_none()); + assert!(get_spl_token_mint_filter( + &Pubkey::new_unique(), + &[ + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), + RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), + ], + ) + .is_none()); + } + + #[test] + fn test_worst_case_encoded_tx_goldens() { + let ff_tx = vec![0xffu8; PACKET_DATA_SIZE]; + let tx58 = bs58::encode(&ff_tx).into_string(); + assert_eq!(tx58.len(), MAX_BASE58_SIZE); + let tx64 = BASE64_STANDARD.encode(&ff_tx); + assert_eq!(tx64.len(), MAX_BASE64_SIZE); + } + + #[test] + fn test_decode_and_deserialize_too_large_payloads_fail() { + // +2 because +1 still fits in base64 encoded worst-case + let too_big = PACKET_DATA_SIZE + 2; + let tx_ser = vec![0xffu8; too_big]; + + let tx58 = bs58::encode(&tx_ser).into_string(); + let tx58_len = tx58.len(); + assert_eq!( + decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58) + .unwrap_err(), + Error::invalid_params(format!( + "base58 encoded solana_sdk::transaction::Transaction too large: {tx58_len} bytes (max: encoded/raw {MAX_BASE58_SIZE}/{PACKET_DATA_SIZE})", + ) + )); + + let tx64 = BASE64_STANDARD.encode(&tx_ser); + let tx64_len = tx64.len(); + assert_eq!( + decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64) + .unwrap_err(), + Error::invalid_params(format!( + "base64 encoded solana_sdk::transaction::Transaction too large: {tx64_len} bytes (max: encoded/raw {MAX_BASE64_SIZE}/{PACKET_DATA_SIZE})", + ) + )); + + let too_big = PACKET_DATA_SIZE + 1; + let tx_ser = vec![0x00u8; too_big]; + let tx58 = bs58::encode(&tx_ser).into_string(); + assert_eq!( + decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58) + .unwrap_err(), + Error::invalid_params(format!( + "decoded solana_sdk::transaction::Transaction too large: {too_big} bytes (max: {PACKET_DATA_SIZE} bytes)" + )) + ); + + let tx64 = BASE64_STANDARD.encode(&tx_ser); + assert_eq!( + decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64) + .unwrap_err(), + Error::invalid_params(format!( + "decoded solana_sdk::transaction::Transaction too large: {too_big} bytes (max: {PACKET_DATA_SIZE} bytes)" + )) + ); + + let tx_ser = vec![0xffu8; PACKET_DATA_SIZE - 2]; + let mut tx64 = BASE64_STANDARD.encode(&tx_ser); + assert_eq!( + decode_and_deserialize::(tx64.clone(), TransactionBinaryEncoding::Base64) + .unwrap_err(), + Error::invalid_params( + "failed to deserialize solana_sdk::transaction::Transaction: invalid value: \ + continue signal on byte-three, expected a terminal signal on or before byte-three" + .to_string() + ) + ); + + tx64.push('!'); + assert_eq!( + decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64) + .unwrap_err(), + Error::invalid_params("invalid base64 encoding: InvalidByte(1640, 33)".to_string()) + ); + + let mut tx58 = bs58::encode(&tx_ser).into_string(); + assert_eq!( + decode_and_deserialize::(tx58.clone(), TransactionBinaryEncoding::Base58) + .unwrap_err(), + Error::invalid_params( + "failed to deserialize solana_sdk::transaction::Transaction: invalid value: \ + continue signal on byte-three, expected a terminal signal on or before byte-three" + .to_string() + ) + ); + + tx58.push('!'); + assert_eq!( + decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58) + .unwrap_err(), + Error::invalid_params( + "invalid base58 encoding: InvalidCharacter { character: '!', index: 1680 }" + .to_string(), + ) + ); + } + + #[test] + fn test_sanitize_unsanitary() { + let unsanitary_tx58 = "ju9xZWuDBX4pRxX2oZkTjxU5jB4SSTgEGhX8bQ8PURNzyzqKMPPpNvWihx8zUe\ + FfrbVNoAaEsNKZvGzAnTDy5bhNT9kt6KFCTBixpvrLCzg4M5UdFUQYrn1gdgjX\ + pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\ + hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK" + .to_string(); + + let unsanitary_versioned_tx = decode_and_deserialize::( + unsanitary_tx58, + TransactionBinaryEncoding::Base58, + ) + .unwrap(); + let expect58 = Error::invalid_params( + "invalid transaction: Transaction failed to sanitize accounts offsets correctly" + .to_string(), + ); + assert_eq!( + sanitize_transaction( + unsanitary_versioned_tx, + SimpleAddressLoader::Disabled, + &ReservedAccountKeys::empty_key_set() + ) + .unwrap_err(), + expect58 + ); + } + + #[test] + fn test_sanitize_unsupported_transaction_version() { + let versioned_tx = VersionedTransaction { + signatures: vec![Signature::default()], + message: VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + ..v0::Message::default() + }), + }; + + assert_eq!( + sanitize_transaction( + versioned_tx, + SimpleAddressLoader::Disabled, + &ReservedAccountKeys::empty_key_set() + ) + .unwrap_err(), + Error::invalid_params( + "invalid transaction: Transaction version is unsupported".to_string(), + ) + ); + } + + #[test] + fn test_get_fee_for_message() { + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + // Slot hashes is necessary for processing versioned txs. + bank.set_sysvar_for_tests(&SlotHashes::default()); + // Correct blockhash is needed because fees are specific to blockhashes + let recent_blockhash = bank.last_blockhash(); + + { + let legacy_msg = VersionedMessage::Legacy(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + recent_blockhash, + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + }); + + let request = create_test_request( + "getFeeForMessage", + Some(json!([ + BASE64_STANDARD.encode(serialize(&legacy_msg).unwrap()) + ])), + ); + let response: RpcResponse = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(response.value, TEST_SIGNATURE_FEE); + } + + { + let v0_msg = VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + recent_blockhash, + account_keys: vec![Pubkey::new_unique()], + ..v0::Message::default() + }); + + let request = create_test_request( + "getFeeForMessage", + Some(json!([BASE64_STANDARD.encode(serialize(&v0_msg).unwrap())])), + ); + let response: RpcResponse = parse_success_result(rpc.handle_request_sync(request)); + assert_eq!(response.value, TEST_SIGNATURE_FEE); + } + } + + #[test] + fn test_rpc_get_recent_prioritization_fees() { + fn wait_for_cache_blocks(cache: &PrioritizationFeeCache, num_blocks: usize) { + while cache.available_block_count() < num_blocks { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + } + + fn assert_fee_vec_eq( + expected: &mut Vec, + actual: &mut Vec, + ) { + expected.sort_by(|a, b| a.slot.partial_cmp(&b.slot).unwrap()); + actual.sort_by(|a, b| a.slot.partial_cmp(&b.slot).unwrap()); + assert_eq!(expected, actual); + } + + let rpc = RpcHandler::start(); + assert_eq!( + rpc.get_prioritization_fee_cache().available_block_count(), + 0 + ); + let slot0 = rpc.working_bank().slot(); + let bank0_id = rpc.working_bank().bank_id(); + let account0 = Pubkey::new_unique(); + let account1 = Pubkey::new_unique(); + let account2 = Pubkey::new_unique(); + let price0 = 42; + let transactions = vec![ + Transaction::new_unsigned(Message::new( + &[ + system_instruction::transfer(&account0, &account1, 1), + ComputeBudgetInstruction::set_compute_unit_price(price0), + ], + Some(&account0), + )), + Transaction::new_unsigned(Message::new( + &[system_instruction::transfer(&account0, &account2, 1)], + Some(&account0), + )), + ]; + rpc.update_prioritization_fee_cache(transactions); + let cache = rpc.get_prioritization_fee_cache(); + cache.finalize_priority_fee(slot0, bank0_id); + wait_for_cache_blocks(cache, 1); + + let request = create_test_request("getRecentPrioritizationFees", None); + let mut response: Vec = + parse_success_result(rpc.handle_request_sync(request)); + assert_fee_vec_eq( + &mut response, + &mut vec![RpcPrioritizationFee { + slot: slot0, + prioritization_fee: 0, + }], + ); + + let request = create_test_request( + "getRecentPrioritizationFees", + Some(json!([[account1.to_string()]])), + ); + let mut response: Vec = + parse_success_result(rpc.handle_request_sync(request)); + assert_fee_vec_eq( + &mut response, + &mut vec![RpcPrioritizationFee { + slot: slot0, + prioritization_fee: price0, + }], + ); + + let request = create_test_request( + "getRecentPrioritizationFees", + Some(json!([[account2.to_string()]])), + ); + let mut response: Vec = + parse_success_result(rpc.handle_request_sync(request)); + assert_fee_vec_eq( + &mut response, + &mut vec![RpcPrioritizationFee { + slot: slot0, + prioritization_fee: 0, + }], + ); + + rpc.advance_bank_to_confirmed_slot(1); + let slot1 = rpc.working_bank().slot(); + let bank1_id = rpc.working_bank().bank_id(); + let price1 = 11; + let transactions = vec![ + Transaction::new_unsigned(Message::new( + &[ + system_instruction::transfer(&account0, &account2, 1), + ComputeBudgetInstruction::set_compute_unit_price(price1), + ], + Some(&account0), + )), + Transaction::new_unsigned(Message::new( + &[system_instruction::transfer(&account0, &account1, 1)], + Some(&account0), + )), + ]; + rpc.update_prioritization_fee_cache(transactions); + let cache = rpc.get_prioritization_fee_cache(); + cache.finalize_priority_fee(slot1, bank1_id); + wait_for_cache_blocks(cache, 2); + + let request = create_test_request("getRecentPrioritizationFees", None); + let mut response: Vec = + parse_success_result(rpc.handle_request_sync(request)); + assert_fee_vec_eq( + &mut response, + &mut vec![ + RpcPrioritizationFee { + slot: slot0, + prioritization_fee: 0, + }, + RpcPrioritizationFee { + slot: slot1, + prioritization_fee: 0, + }, + ], + ); + + let request = create_test_request( + "getRecentPrioritizationFees", + Some(json!([[account1.to_string()]])), + ); + let mut response: Vec = + parse_success_result(rpc.handle_request_sync(request)); + assert_fee_vec_eq( + &mut response, + &mut vec![ + RpcPrioritizationFee { + slot: slot0, + prioritization_fee: price0, + }, + RpcPrioritizationFee { + slot: slot1, + prioritization_fee: 0, + }, + ], + ); + + let request = create_test_request( + "getRecentPrioritizationFees", + Some(json!([[account2.to_string()]])), + ); + let mut response: Vec = + parse_success_result(rpc.handle_request_sync(request)); + assert_fee_vec_eq( + &mut response, + &mut vec![ + RpcPrioritizationFee { + slot: slot0, + prioritization_fee: 0, + }, + RpcPrioritizationFee { + slot: slot1, + prioritization_fee: price1, + }, + ], + ); + } +} diff --git a/rpc/src/jsonrpc/mod.rs b/rpc/src/jsonrpc/mod.rs new file mode 100644 index 0000000..dd01182 --- /dev/null +++ b/rpc/src/jsonrpc/mod.rs @@ -0,0 +1,3 @@ +mod cache; +pub mod core; +pub mod service; diff --git a/rpc/src/jsonrpc/service.rs b/rpc/src/jsonrpc/service.rs new file mode 100644 index 0000000..504643b --- /dev/null +++ b/rpc/src/jsonrpc/service.rs @@ -0,0 +1,506 @@ +use super::cache::LargestAccountsCache; +use super::core::rpc_accounts::AccountsData; +use super::core::rpc_accounts_scan::AccountsScan; +use super::core::rpc_bank::BankData; +use super::core::rpc_full::Full; +use super::core::rpc_minimal::Minimal; +use super::core::{ + rpc_accounts, rpc_accounts_scan, rpc_bank, rpc_full, rpc_minimal, JsonRpcConfig, + JsonRpcRequestProcessor, MAX_REQUEST_BODY_SIZE, +}; +use crossbeam_channel::{unbounded, Receiver, Sender}; +use jsonrpc_core::futures_util::TryStreamExt; +use jsonrpc_core::MetaIoHandler; +use jsonrpc_http_server::{ + hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware, + RequestMiddlewareAction, ServerBuilder, +}; +use regex::Regex; +// use solana_ledger::bigtable_upload::ConfirmedBlockUploadConfig; +// use solana_ledger::bigtable_upload_service::BigTableUploadService; +use solana_ledger::blockstore::Blockstore; +use solana_perf::thread::renice_this_thread; +use solana_runtime::bank_forks::BankForks; +use solana_runtime::prioritization_fee_cache::PrioritizationFeeCache; +use solana_runtime::snapshot_archive_info::SnapshotArchiveInfoGetter; +use solana_runtime::snapshot_config::SnapshotConfig; +use solana_runtime::snapshot_utils; +use solana_sdk::exit::Exit; +use solana_sdk::genesis_config::DEFAULT_GENESIS_DOWNLOAD_PATH; +use solana_sdk::hash::Hash; +use solana_sdk::native_token::lamports_to_sol; +use solana_sdk::transaction::SanitizedTransaction; +// use solana_storage_bigtable::CredentialType; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::{Arc, RwLock}; +use std::thread; +use std::thread::{Builder, JoinHandle}; +use tokio_util::codec::{BytesCodec, FramedRead}; + +const FULL_SNAPSHOT_REQUEST_PATH: &str = "/snapshot.tar.bz2"; +const INCREMENTAL_SNAPSHOT_REQUEST_PATH: &str = "/incremental-snapshot.tar.bz2"; +const LARGEST_ACCOUNTS_CACHE_DURATION: u64 = 60 * 60 * 2; + +pub struct JsonRpcService { + thread_hdl: JoinHandle<()>, + close_handle: Option, +} + +struct RpcRequestMiddleware { + ledger_path: PathBuf, + full_snapshot_archive_path_regex: Regex, + incremental_snapshot_archive_path_regex: Regex, + snapshot_config: Option, + bank_forks: Arc>, +} + +impl RpcRequestMiddleware { + pub fn new( + ledger_path: PathBuf, + snapshot_config: Option, + bank_forks: Arc>, + ) -> Self { + Self { + ledger_path, + full_snapshot_archive_path_regex: Regex::new( + snapshot_utils::FULL_SNAPSHOT_ARCHIVE_FILENAME_REGEX, + ) + .unwrap(), + incremental_snapshot_archive_path_regex: Regex::new( + snapshot_utils::INCREMENTAL_SNAPSHOT_ARCHIVE_FILENAME_REGEX, + ) + .unwrap(), + snapshot_config, + bank_forks, + } + } + + fn redirect(location: &str) -> hyper::Response { + hyper::Response::builder() + .status(hyper::StatusCode::SEE_OTHER) + .header(hyper::header::LOCATION, location) + .body(hyper::Body::from(String::from(location))) + .unwrap() + } + + fn not_found() -> hyper::Response { + hyper::Response::builder() + .status(hyper::StatusCode::NOT_FOUND) + .body(hyper::Body::empty()) + .unwrap() + } + + fn internal_server_error() -> hyper::Response { + hyper::Response::builder() + .status(hyper::StatusCode::INTERNAL_SERVER_ERROR) + .body(hyper::Body::empty()) + .unwrap() + } + + fn strip_leading_slash(path: &str) -> Option<&str> { + path.strip_prefix('/') + } + + fn is_file_get_path(&self, path: &str) -> bool { + if path == DEFAULT_GENESIS_DOWNLOAD_PATH { + return true; + } + + if self.snapshot_config.is_none() { + return false; + } + + let Some(path) = Self::strip_leading_slash(path) else { + return false; + }; + + self.full_snapshot_archive_path_regex.is_match(path) + || self.incremental_snapshot_archive_path_regex.is_match(path) + } + + #[cfg(unix)] + async fn open_no_follow(path: impl AsRef) -> std::io::Result { + tokio::fs::OpenOptions::new() + .read(true) + .write(false) + .create(false) + .custom_flags(libc::O_NOFOLLOW) + .open(path) + .await + } + + #[cfg(not(unix))] + async fn open_no_follow(path: impl AsRef) -> std::io::Result { + // TODO: Is there any way to achieve the same on Windows? + tokio::fs::File::open(path).await + } + + fn find_snapshot_file

(&self, stem: P) -> PathBuf + where + P: AsRef, + { + let root = if self + .full_snapshot_archive_path_regex + .is_match(Path::new("").join(&stem).to_str().unwrap()) + { + &self + .snapshot_config + .as_ref() + .unwrap() + .full_snapshot_archives_dir + } else { + &self + .snapshot_config + .as_ref() + .unwrap() + .incremental_snapshot_archives_dir + }; + let local_path = root.join(&stem); + if local_path.exists() { + local_path + } else { + // remote snapshot archive path + snapshot_utils::build_snapshot_archives_remote_dir(root).join(stem) + } + } + + fn process_file_get(&self, path: &str) -> RequestMiddlewareAction { + let filename = { + let stem = Self::strip_leading_slash(path).expect("path already verified"); + match path { + DEFAULT_GENESIS_DOWNLOAD_PATH => { + inc_new_counter_info!("rpc-get_genesis", 1); + self.ledger_path.join(stem) + } + _ => { + inc_new_counter_info!("rpc-get_snapshot", 1); + self.find_snapshot_file(stem) + } + } + }; + + let file_length = std::fs::metadata(&filename) + .map(|m| m.len()) + .unwrap_or(0) + .to_string(); + info!("get {} -> {:?} ({} bytes)", path, filename, file_length); + RequestMiddlewareAction::Respond { + should_validate_hosts: true, + response: Box::pin(async { + match Self::open_no_follow(filename).await { + Err(err) => Ok(if err.kind() == std::io::ErrorKind::NotFound { + Self::not_found() + } else { + Self::internal_server_error() + }), + Ok(file) => { + let stream = + FramedRead::new(file, BytesCodec::new()).map_ok(|b| b.freeze()); + let body = hyper::Body::wrap_stream(stream); + + Ok(hyper::Response::builder() + .header(hyper::header::CONTENT_LENGTH, file_length) + .body(body) + .unwrap()) + } + } + }), + } + } + + fn health_check(&self) -> &'static str { + // always health + "ok" + } +} + +impl RequestMiddleware for RpcRequestMiddleware { + fn on_request(&self, request: hyper::Request) -> RequestMiddlewareAction { + trace!("request uri: {}", request.uri()); + + if let Some(ref snapshot_config) = self.snapshot_config { + if request.uri().path() == FULL_SNAPSHOT_REQUEST_PATH + || request.uri().path() == INCREMENTAL_SNAPSHOT_REQUEST_PATH + { + // Convenience redirect to the latest snapshot + let full_snapshot_archive_info = + snapshot_utils::get_highest_full_snapshot_archive_info( + &snapshot_config.full_snapshot_archives_dir, + ); + let snapshot_archive_info = + if let Some(full_snapshot_archive_info) = full_snapshot_archive_info { + if request.uri().path() == FULL_SNAPSHOT_REQUEST_PATH { + Some(full_snapshot_archive_info.snapshot_archive_info().clone()) + } else { + snapshot_utils::get_highest_incremental_snapshot_archive_info( + &snapshot_config.incremental_snapshot_archives_dir, + full_snapshot_archive_info.slot(), + ) + .map(|incremental_snapshot_archive_info| { + incremental_snapshot_archive_info + .snapshot_archive_info() + .clone() + }) + } + } else { + None + }; + return if let Some(snapshot_archive_info) = snapshot_archive_info { + RpcRequestMiddleware::redirect(&format!( + "/{}", + snapshot_archive_info + .path + .file_name() + .unwrap_or_else(|| std::ffi::OsStr::new("")) + .to_str() + .unwrap_or("") + )) + } else { + RpcRequestMiddleware::not_found() + } + .into(); + } + } + + if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) { + hyper::Response::builder() + .status(hyper::StatusCode::OK) + .body(hyper::Body::from(result)) + .unwrap() + .into() + } else if self.is_file_get_path(request.uri().path()) { + self.process_file_get(request.uri().path()) + } else if request.uri().path() == "/health" { + hyper::Response::builder() + .status(hyper::StatusCode::OK) + .body(hyper::Body::from(self.health_check())) + .unwrap() + .into() + } else { + request.into() + } + } +} + +fn process_rest(bank_forks: &Arc>, path: &str) -> Option { + match path { + "/v0/circulating-supply" => { + let bank = bank_forks.read().unwrap().root_bank(); + let total_supply = bank.capitalization(); + let non_circulating_supply = + solana_runtime::non_circulating_supply::calculate_non_circulating_supply(&bank) + .expect("Scan should not error on root banks") + .lamports; + Some(format!( + "{}", + lamports_to_sol(total_supply - non_circulating_supply) + )) + } + "/v0/total-supply" => { + let bank = bank_forks.read().unwrap().root_bank(); + let total_supply = bank.capitalization(); + Some(format!("{}", lamports_to_sol(total_supply))) + } + _ => None, + } +} + +impl JsonRpcService { + #[allow(clippy::too_many_arguments)] + pub fn new( + rpc_addr: SocketAddr, + config: JsonRpcConfig, + snapshot_config: Option, + bank_forks: Arc>, + blockstore: Arc, + genesis_hash: Hash, + tx_channel: (Sender, Receiver), + ledger_path: &Path, + node_exit: Arc>, + max_complete_transaction_status_slot: Arc, + prioritization_fee_cache: Arc, + ) -> Result { + info!("rpc bound to {:?}", rpc_addr); + info!("rpc configuration: {:?}", config); + let rpc_threads = 1.max(config.rpc_threads); + let rpc_niceness_adj = config.rpc_niceness_adj; + + let largest_accounts_cache = Arc::new(RwLock::new(LargestAccountsCache::new( + LARGEST_ACCOUNTS_CACHE_DURATION, + ))); + + // sadly, some parts of our current rpc implemention block the jsonrpc's + // _socket-listening_ event loop for too long, due to (blocking) long IO or intesive CPU, + // causing no further processing of incoming requests and ultimatily innocent clients timing-out. + // So create a (shared) multi-threaded event_loop for jsonrpc and set its .threads() to 1, + // so that we avoid the single-threaded event loops from being created automatically by + // jsonrpc for threads when .threads(N > 1) is given. + let runtime = Arc::new( + tokio::runtime::Builder::new_multi_thread() + .worker_threads(rpc_threads) + .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) + .thread_name("solRpcEl") + .enable_all() + .build() + .expect("Runtime"), + ); + + let exit_bigtable_ledger_upload_service = Arc::new(AtomicBool::new(false)); + + // Note: Since block_commitment_cache is not used, so we can not construct + // bigtable_ledger_upload_service now. + // TODO: support bigtable ledger storage in future + // support bigtable ledger storage in future + // let (bigtable_ledger_storage, _bigtable_ledger_upload_service) = + // if let Some(RpcBigtableConfig { + // enable_bigtable_ledger_upload, + // ref bigtable_instance_name, + // ref bigtable_app_profile_id, + // timeout, + // max_message_size, + // }) = config.rpc_bigtable_config + // { + // let bigtable_config = solana_storage_bigtable::LedgerStorageConfig { + // read_only: !enable_bigtable_ledger_upload, + // timeout, + // credential_type: CredentialType::Filepath(None), + // instance_name: bigtable_instance_name.clone(), + // app_profile_id: bigtable_app_profile_id.clone(), + // max_message_size, + // }; + // runtime + // .block_on(solana_storage_bigtable::LedgerStorage::new_with_config( + // bigtable_config, + // )) + // .map(|bigtable_ledger_storage| { + // info!("BigTable ledger storage initialized"); + // + // let bigtable_ledger_upload_service = if enable_bigtable_ledger_upload { + // Some(Arc::new(BigTableUploadService::new_with_config( + // runtime.clone(), + // bigtable_ledger_storage.clone(), + // blockstore.clone(), + // block_commitment_cache.clone(), + // max_complete_transaction_status_slot.clone(), + // Arc::new(AtomicU64::new(u64::MAX)), // Actually we do not need this + // ConfirmedBlockUploadConfig::default(), + // exit_bigtable_ledger_upload_service.clone(), + // ))) + // } else { + // None + // }; + // + // ( + // Some(bigtable_ledger_storage), + // bigtable_ledger_upload_service, + // ) + // }) + // .unwrap_or_else(|err| { + // error!("Failed to initialize BigTable ledger storage: {:?}", err); + // (None, None) + // }) + // } else { + // (None, None) + // }; + + let full_api = config.full_api; + let max_request_body_size = config + .max_request_body_size + .unwrap_or(MAX_REQUEST_BODY_SIZE); + let request_processor = JsonRpcRequestProcessor::new( + config, + snapshot_config.clone(), + bank_forks.clone(), + blockstore, + node_exit.clone(), + genesis_hash, + tx_channel, + None, // bigtable_ledger_storage, + largest_accounts_cache, + max_complete_transaction_status_slot, + prioritization_fee_cache, + ); + + let ledger_path = ledger_path.to_path_buf(); + + let (close_handle_sender, close_handle_receiver) = unbounded(); + let thread_hdl = Builder::new() + .name("solJsonRpcSvc".to_string()) + .spawn(move || { + renice_this_thread(rpc_niceness_adj).unwrap(); + + let mut io = MetaIoHandler::default(); + + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); + if full_api { + io.extend_with(rpc_bank::BankDataImpl.to_delegate()); + io.extend_with(rpc_accounts::AccountsDataImpl.to_delegate()); + io.extend_with(rpc_accounts_scan::AccountsScanImpl.to_delegate()); + io.extend_with(rpc_full::FullImpl.to_delegate()); + } + + let request_middleware = + RpcRequestMiddleware::new(ledger_path, snapshot_config, bank_forks.clone()); + let server = ServerBuilder::with_meta_extractor( + io, + move |req: &hyper::Request| { + let xbigtable = req.headers().get("x-bigtable"); + if xbigtable.is_some_and(|v| v == "disabled") { + request_processor.clone_without_bigtable() + } else { + request_processor.clone() + } + }, + ) + .event_loop_executor(runtime.handle().clone()) + .threads(1) + .cors(DomainsValidation::AllowOnly(vec![ + AccessControlAllowOrigin::Any, + ])) + .cors_max_age(86400) + .request_middleware(request_middleware) + .max_request_body_size(max_request_body_size) + .start_http(&rpc_addr); + + if let Err(e) = server { + warn!( + "JSON RPC service unavailable error: {:?}. \n\ + Also, check that port {} is not already in use by another application", + e, + rpc_addr.port() + ); + close_handle_sender.send(Err(e.to_string())).unwrap(); + return; + } + + let server = server.unwrap(); + close_handle_sender.send(Ok(server.close_handle())).unwrap(); + server.wait(); + exit_bigtable_ledger_upload_service.store(true, Ordering::Relaxed); + }) + .unwrap(); + + let close_handle = close_handle_receiver.recv().unwrap()?; + let close_handle_ = close_handle.clone(); + node_exit.write().unwrap().register_exit(Box::new(move || { + close_handle_.close(); + })); + Ok(Self { + thread_hdl, + close_handle: Some(close_handle), + }) + } + + pub fn exit(&mut self) { + if let Some(c) = self.close_handle.take() { + c.close() + } + } + + pub fn join(mut self) -> thread::Result<()> { + self.exit(); + self.thread_hdl.join() + } +} diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs new file mode 100644 index 0000000..e80da9f --- /dev/null +++ b/rpc/src/lib.rs @@ -0,0 +1,21 @@ +#![allow(dead_code)] + +#[macro_use] +extern crate log; + +#[cfg(test)] +#[macro_use] +extern crate serde_json; + +#[macro_use] +extern crate solana_metrics; + +mod account_resolver; +mod error; +mod filter; +mod parsed_token_accounts; + +pub mod jsonrpc; +pub mod service; + +use error::{Error, Result}; diff --git a/rpc/src/parsed_token_accounts.rs b/rpc/src/parsed_token_accounts.rs new file mode 100644 index 0000000..bca7fd8 --- /dev/null +++ b/rpc/src/parsed_token_accounts.rs @@ -0,0 +1,127 @@ +use crate::account_resolver; +use { + jsonrpc_core::{Error, Result}, + solana_account_decoder::{ + parse_account_data::{AccountAdditionalDataV2, SplTokenAdditionalData}, + parse_token::get_token_account_mint, + UiAccount, UiAccountData, UiAccountEncoding, + }, + solana_rpc_client_api::response::RpcKeyedAccount, + solana_runtime::bank::Bank, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + pubkey::Pubkey, + }, + spl_token_2022::{ + extension::{ + interest_bearing_mint::InterestBearingConfig, BaseStateWithExtensions, + StateWithExtensions, + }, + state::Mint, + }, + std::{collections::HashMap, sync::Arc}, +}; + +pub fn get_parsed_token_account( + bank: &Bank, + pubkey: &Pubkey, + account: AccountSharedData, + // only used for simulation results + overwrite_accounts: Option<&HashMap>, +) -> UiAccount { + let additional_data = get_token_account_mint(account.data()) + .and_then(|mint_pubkey| { + account_resolver::get_account_from_overwrites_or_bank( + &mint_pubkey, + bank, + overwrite_accounts, + ) + }) + .and_then(|mint_account| get_additional_mint_data(bank, mint_account.data()).ok()) + .map(|data| AccountAdditionalDataV2 { + spl_token_additional_data: Some(data), + }); + + UiAccount::encode( + pubkey, + &account, + UiAccountEncoding::JsonParsed, + additional_data, + None, + ) +} + +pub fn get_parsed_token_accounts( + bank: Arc, + keyed_accounts: I, +) -> impl Iterator +where + I: Iterator, +{ + let mut mint_data: HashMap = HashMap::new(); + keyed_accounts.filter_map(move |(pubkey, account)| { + let additional_data = get_token_account_mint(account.data()).and_then(|mint_pubkey| { + mint_data.get(&mint_pubkey).cloned().or_else(|| { + let (_, data) = get_mint_owner_and_additional_data(&bank, &mint_pubkey).ok()?; + let data = AccountAdditionalDataV2 { + spl_token_additional_data: Some(data), + }; + mint_data.insert(mint_pubkey, data); + Some(data) + }) + }); + + let maybe_encoded_account = UiAccount::encode( + &pubkey, + &account, + UiAccountEncoding::JsonParsed, + additional_data, + None, + ); + if let UiAccountData::Json(_) = &maybe_encoded_account.data { + Some(RpcKeyedAccount { + pubkey: pubkey.to_string(), + account: maybe_encoded_account, + }) + } else { + None + } + }) +} + +/// Analyze a mint Pubkey that may be the native_mint and get the mint-account owner (token +/// program_id) and decimals +pub(crate) fn get_mint_owner_and_additional_data( + bank: &Bank, + mint: &Pubkey, +) -> Result<(Pubkey, SplTokenAdditionalData)> { + if mint == &spl_token::native_mint::id() { + Ok(( + spl_token::id(), + SplTokenAdditionalData::with_decimals(spl_token::native_mint::DECIMALS), + )) + } else { + let mint_account = bank.get_account(mint).ok_or_else(|| { + Error::invalid_params("Invalid param: could not find mint".to_string()) + })?; + let mint_data = get_additional_mint_data(bank, mint_account.data())?; + Ok((*mint_account.owner(), mint_data)) + } +} + +fn get_additional_mint_data(bank: &Bank, data: &[u8]) -> Result { + StateWithExtensions::::unpack(data) + .map_err(|_| { + Error::invalid_params("Invalid param: Token mint could not be unpacked".to_string()) + }) + .map(|mint| { + let interest_bearing_config = mint + .get_extension::() + .map(|x| (*x, bank.clock().unix_timestamp)) + .ok(); + SplTokenAdditionalData { + decimals: mint.base.decimals, + interest_bearing_config, + } + }) +} diff --git a/rpc/src/service.rs b/rpc/src/service.rs new file mode 100644 index 0000000..aafef1f --- /dev/null +++ b/rpc/src/service.rs @@ -0,0 +1,153 @@ +use crate::jsonrpc::core::JsonRpcConfig; +use crate::jsonrpc::service::JsonRpcService; +use crate::Result; +use crossbeam_channel::{Receiver, Sender}; +use igloo_storage::RollupStorage; +use solana_ledger::blockstore::Blockstore; +use solana_runtime::bank_forks::BankForks; +use solana_runtime::prioritization_fee_cache::PrioritizationFeeCache; +use solana_runtime::snapshot_config::SnapshotConfig; +use solana_sdk::exit::Exit; +use solana_sdk::hash::Hash; +use solana_sdk::transaction::SanitizedTransaction; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::path::Path; +use std::sync::atomic::AtomicU64; +use std::sync::{Arc, RwLock}; + +#[derive(Debug, Clone)] +pub struct RpcConfig { + pub rpc_addr: SocketAddr, + pub jsonrpc_config: JsonRpcConfig, +} + +impl Default for RpcConfig { + fn default() -> Self { + Self { + rpc_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 8899)), + jsonrpc_config: JsonRpcConfig::default(), + } + } +} + +pub struct RpcService { + // Note: In current implementation, we removed `solana_send_transaction_service`, which sent + // transactions to TPU. We do not consider to make the rpc service independently right now, the + // exposed transaction receiver below should be used by transaction stream module directly. + pub jsonrpc: JsonRpcService, +} + +impl RpcService { + #[allow(clippy::too_many_arguments)] + pub fn new( + rpc_config: RpcConfig, + tx_channel: (Sender, Receiver), + node_exit: Arc>, + storage: &RollupStorage, + ) -> Result { + let storage_config = storage.config(); + let jsonrpc = Self::new_rpc_service( + &rpc_config, + storage_config.storage.snapshot_config.clone(), + storage.bank_forks(), + storage.blockstore(), + storage_config + .storage + .expected_genesis_hash + .unwrap_or(storage_config.genesis.hash()), + tx_channel, + storage_config.ledger_path.as_path(), + node_exit.clone(), + storage + .history_services() + .max_complete_transaction_status_slot + .clone(), + )?; + + if storage_config.storage.halt_at_slot.is_some() { + // Park with the RPC service running, ready for inspection! + warn!("Validator halted"); + std::thread::park(); + } + + Ok(Self { jsonrpc }) + } + + pub fn join(self) { + self.jsonrpc.join().expect("jsonrpc_service"); + } + + #[allow(clippy::too_many_arguments)] + fn new_rpc_service( + rpc_config: &RpcConfig, + snapshot_config: SnapshotConfig, + bank_forks: Arc>, + blockstore: Arc, + genesis_hash: Hash, + tx_channel: (Sender, Receiver), + ledger_path: &Path, + node_exit: Arc>, + max_complete_transaction_status_slot: Arc, + ) -> Result { + // block min prioritization fee cache should be readable by RPC, and writable by validator + // (by both replay stage and banking stage) + let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); + + JsonRpcService::new( + rpc_config.rpc_addr, + rpc_config.jsonrpc_config.clone(), + Some(snapshot_config), + bank_forks, + blockstore, + genesis_hash, + tx_channel, + ledger_path, + node_exit, + max_complete_transaction_status_slot.clone(), + prioritization_fee_cache, + ) + .map_err(crate::Error::InitJsonRpc) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use anyhow::Result; + use igloo_storage::config::GlobalConfig; + use solana_client::rpc_client::RpcClient; + + fn new_rpc_service(storage: &mut RollupStorage, rpc_config: RpcConfig) -> Result { + let node_exit = Arc::new(RwLock::new(Exit::default())); + + let rpc_service = RpcService::new( + rpc_config, + crossbeam_channel::unbounded(), + node_exit, + storage, + )?; + + Ok(rpc_service) + } + + #[test] + fn test_json_rpc_service() -> Result<()> { + let ledger_path = tempfile::tempdir()?.into_path(); + let config = GlobalConfig::new_dev(&ledger_path)?; + let mut storage = RollupStorage::new(config)?; + storage.init()?; + + let rpc_config = RpcConfig::default(); + let rpc_service = new_rpc_service(&mut storage, rpc_config.clone())?; + + // Test the JSON RPC service + let client = RpcClient::new_socket(rpc_config.rpc_addr); + let hash = client + .get_genesis_hash() + .expect("Failed to get genesis hash"); + assert_eq!(hash, storage.config().genesis.hash()); + + rpc_service.join(); + Ok(()) + } +}