From 6a56a2ec86316645c6349ba922428e414d7641d9 Mon Sep 17 00:00:00 2001
From: Lucas B
Date: Thu, 25 Aug 2022 17:18:46 -0500
Subject: [PATCH] Jito Patch Use cluster info functions for tpu (#345) (#347)
Use git rev-parse for git sha Remove blacklisted tx from
message_hash_to_transaction (Backport #374) (#376) Updates scripts for easy
local setup. (#383) Backports sim_bundle improvements (#407) backports clone
derivation 416 (#417) Backport #419: add upsert to AccountOverrides (#420)
---
.dockerignore | 9 +
.github/workflows/client-targets.yml | 4 +
.github/workflows/crate-check.yml | 1 +
.github/workflows/docs.yml | 3 +
.github/workflows/downstream-project-spl.yml | 6 +
.../increment-cargo-version-on-release.yml | 2 +
.github/workflows/release-artifacts.yml | 1 +
.gitignore | 6 +-
.gitmodules | 9 +
Cargo.lock | 443 ++++-
Cargo.toml | 10 +
README.md | 141 +-
SECURITY.md | 167 --
anchor | 1 +
banking-bench/src/main.rs | 14 +-
banks-server/Cargo.toml | 5 +
banks-server/src/banks_server.rs | 5 +-
bootstrap | 28 +
bundle/Cargo.toml | 34 +
bundle/src/bundle_execution.rs | 1186 ++++++++++++
bundle/src/lib.rs | 60 +
ci/buildkite-pipeline-in-disk.sh | 4 +-
ci/buildkite-pipeline.sh | 4 +-
ci/channel-info.sh | 2 +-
ci/check-crates.sh | 3 +
core/Cargo.toml | 11 +
core/benches/banking_stage.rs | 26 +-
core/benches/cluster_info.rs | 1 +
core/benches/consumer.rs | 24 +-
core/benches/proto_to_packet.rs | 56 +
core/benches/retransmit_stage.rs | 1 +
core/src/accounts_hash_verifier.rs | 7 +-
core/src/admin_rpc_post_init.rs | 7 +-
core/src/banking_stage.rs | 66 +-
core/src/banking_stage/committer.rs | 17 +-
core/src/banking_stage/consume_worker.rs | 11 +-
core/src/banking_stage/consumer.rs | 158 +-
core/src/banking_trace.rs | 1 +
core/src/broadcast_stage.rs | 49 +-
.../broadcast_duplicates_run.rs | 1 +
.../broadcast_fake_shreds_run.rs | 1 +
core/src/broadcast_stage/broadcast_utils.rs | 60 +-
.../fail_entry_verification_broadcast_run.rs | 4 +-
.../broadcast_stage/standard_broadcast_run.rs | 30 +-
core/src/bundle_stage.rs | 435 +++++
.../src/bundle_stage/bundle_account_locker.rs | 328 ++++
core/src/bundle_stage/bundle_consumer.rs | 1589 +++++++++++++++++
.../bundle_packet_deserializer.rs | 286 +++
.../bundle_stage/bundle_packet_receiver.rs | 848 +++++++++
.../bundle_reserved_space_manager.rs | 189 ++
.../bundle_stage_leader_metrics.rs | 502 ++++++
core/src/bundle_stage/committer.rs | 221 +++
core/src/bundle_stage/result.rs | 41 +
core/src/consensus_cache_updater.rs | 52 +
core/src/immutable_deserialized_bundle.rs | 483 +++++
core/src/latest_unprocessed_votes.rs | 2 +-
core/src/lib.rs | 44 +
core/src/packet_bundle.rs | 7 +
core/src/proxy/auth.rs | 185 ++
core/src/proxy/block_engine_stage.rs | 533 ++++++
core/src/proxy/fetch_stage_manager.rs | 170 ++
core/src/proxy/mod.rs | 100 ++
core/src/proxy/relayer_stage.rs | 495 +++++
core/src/qos_service.rs | 45 +-
core/src/replay_stage.rs | 4 +-
core/src/retransmit_stage.rs | 16 +-
core/src/snapshot_packager_service.rs | 19 +-
core/src/tip_manager.rs | 583 ++++++
core/src/tpu.rs | 113 +-
core/src/tpu_entry_notifier.rs | 60 +-
core/src/tvu.rs | 5 +-
core/src/unprocessed_transaction_storage.rs | 782 ++++----
core/src/validator.rs | 25 +-
core/tests/epoch_accounts_hash.rs | 2 +
core/tests/snapshots.rs | 5 +
deploy_programs | 17 +
dev/Dockerfile | 48 +
entry/src/entry.rs | 2 +-
entry/src/poh.rs | 29 +-
f | 30 +
fetch-spl.sh | 41 +-
frozen-abi/macro/src/lib.rs | 2 +-
.../src/geyser_plugin_manager.rs | 36 +-
gossip/src/cluster_info.rs | 4 +
jito-programs | 1 +
jito-protos/Cargo.toml | 19 +
jito-protos/build.rs | 38 +
jito-protos/protos | 1 +
jito-protos/src/lib.rs | 25 +
ledger-tool/src/ledger_utils.rs | 16 +-
ledger-tool/src/main.rs | 31 +-
ledger-tool/src/program.rs | 1 +
ledger/src/bank_forks_utils.rs | 21 +-
ledger/src/blockstore_processor.rs | 5 +-
ledger/src/token_balances.rs | 58 +-
.../src/local_cluster_snapshot_utils.rs | 6 +-
local-cluster/src/validator_configs.rs | 5 +
local-cluster/tests/local_cluster.rs | 12 +-
merkle-tree/src/merkle_tree.rs | 46 +-
multinode-demo/bootstrap-validator.sh | 34 +
multinode-demo/validator.sh | 40 +
perf/src/sigverify.rs | 2 +-
poh/src/poh_recorder.rs | 138 +-
poh/src/poh_service.rs | 34 +-
program-runtime/src/timings.rs | 23 +-
program-test/src/programs.rs | 17 +
.../programs/jito_tip_distribution-0.1.3.so | Bin 0 -> 439968 bytes
.../src/programs/jito_tip_payment-0.1.3.so | Bin 0 -> 433224 bytes
programs/sbf/Cargo.lock | 301 +++-
programs/sbf/tests/programs.rs | 4 +-
rpc-client-api/Cargo.toml | 2 +
rpc-client-api/src/bundles.rs | 166 ++
rpc-client-api/src/config.rs | 2 +-
rpc-client-api/src/lib.rs | 1 +
rpc-client-api/src/request.rs | 3 +
rpc-client-api/src/response.rs | 16 +
rpc-client/src/http_sender.rs | 209 ++-
rpc-client/src/mock_sender.rs | 7 +
rpc-client/src/nonblocking/rpc_client.rs | 131 +-
rpc-client/src/rpc_client.rs | 30 +
rpc-client/src/rpc_sender.rs | 4 +
rpc-test/Cargo.toml | 1 +
rpc-test/tests/rpc.rs | 2 +
rpc/Cargo.toml | 2 +
rpc/src/rpc.rs | 489 ++++-
rpc/src/rpc_service.rs | 9 +-
runtime/src/account_overrides.rs | 6 +-
runtime/src/accounts.rs | 98 +-
runtime/src/bank.rs | 169 +-
runtime/src/cost_tracker.rs | 8 +
runtime/src/snapshot_package.rs | 8 +-
runtime/src/snapshot_utils.rs | 30 +-
runtime/src/stake_account.rs | 4 +-
runtime/src/stakes.rs | 12 +-
runtime/src/transaction_batch.rs | 24 +-
rustfmt.toml | 5 +
s | 15 +
scripts/increment-cargo-version.sh | 2 +
scripts/run.sh | 4 +
sdk/Cargo.toml | 1 +
sdk/src/bundle/mod.rs | 33 +
sdk/src/lib.rs | 1 +
send-transaction-service/Cargo.toml | 2 +
.../src/send_transaction_service.rs | 47 +-
start | 9 +
start_multi | 30 +
tip-distributor/Cargo.toml | 52 +
tip-distributor/README.md | 43 +
tip-distributor/src/bin/claim-mev-tips.rs | 52 +
.../src/bin/merkle-root-generator.rs | 34 +
.../src/bin/merkle-root-uploader.rs | 50 +
tip-distributor/src/bin/reclaim-rent.rs | 62 +
.../src/bin/stake-meta-generator.rs | 67 +
tip-distributor/src/claim_mev_workflow.rs | 152 ++
tip-distributor/src/lib.rs | 887 +++++++++
.../src/merkle_root_generator_workflow.rs | 54 +
.../src/merkle_root_upload_workflow.rs | 134 ++
tip-distributor/src/reclaim_rent_workflow.rs | 167 ++
.../src/stake_meta_generator_workflow.rs | 952 ++++++++++
transaction-status/src/lib.rs | 9 +-
validator/Cargo.toml | 1 +
validator/src/admin_rpc_service.rs | 116 +-
validator/src/bootstrap.rs | 9 +-
validator/src/cli.rs | 179 ++
validator/src/dashboard.rs | 1 +
validator/src/main.rs | 150 +-
version/src/lib.rs | 2 +-
167 files changed, 15426 insertions(+), 1264 deletions(-)
create mode 100644 .dockerignore
create mode 100644 .gitmodules
delete mode 100644 SECURITY.md
create mode 160000 anchor
create mode 100755 bootstrap
create mode 100644 bundle/Cargo.toml
create mode 100644 bundle/src/bundle_execution.rs
create mode 100644 bundle/src/lib.rs
create mode 100644 core/benches/proto_to_packet.rs
create mode 100644 core/src/bundle_stage.rs
create mode 100644 core/src/bundle_stage/bundle_account_locker.rs
create mode 100644 core/src/bundle_stage/bundle_consumer.rs
create mode 100644 core/src/bundle_stage/bundle_packet_deserializer.rs
create mode 100644 core/src/bundle_stage/bundle_packet_receiver.rs
create mode 100644 core/src/bundle_stage/bundle_reserved_space_manager.rs
create mode 100644 core/src/bundle_stage/bundle_stage_leader_metrics.rs
create mode 100644 core/src/bundle_stage/committer.rs
create mode 100644 core/src/bundle_stage/result.rs
create mode 100644 core/src/consensus_cache_updater.rs
create mode 100644 core/src/immutable_deserialized_bundle.rs
create mode 100644 core/src/packet_bundle.rs
create mode 100644 core/src/proxy/auth.rs
create mode 100644 core/src/proxy/block_engine_stage.rs
create mode 100644 core/src/proxy/fetch_stage_manager.rs
create mode 100644 core/src/proxy/mod.rs
create mode 100644 core/src/proxy/relayer_stage.rs
create mode 100644 core/src/tip_manager.rs
create mode 100755 deploy_programs
create mode 100644 dev/Dockerfile
create mode 100755 f
create mode 160000 jito-programs
create mode 100644 jito-protos/Cargo.toml
create mode 100644 jito-protos/build.rs
create mode 160000 jito-protos/protos
create mode 100644 jito-protos/src/lib.rs
create mode 100644 program-test/src/programs/jito_tip_distribution-0.1.3.so
create mode 100644 program-test/src/programs/jito_tip_payment-0.1.3.so
create mode 100644 rpc-client-api/src/bundles.rs
create mode 100755 s
create mode 100644 sdk/src/bundle/mod.rs
create mode 100755 start
create mode 100755 start_multi
create mode 100644 tip-distributor/Cargo.toml
create mode 100644 tip-distributor/README.md
create mode 100644 tip-distributor/src/bin/claim-mev-tips.rs
create mode 100644 tip-distributor/src/bin/merkle-root-generator.rs
create mode 100644 tip-distributor/src/bin/merkle-root-uploader.rs
create mode 100644 tip-distributor/src/bin/reclaim-rent.rs
create mode 100644 tip-distributor/src/bin/stake-meta-generator.rs
create mode 100644 tip-distributor/src/claim_mev_workflow.rs
create mode 100644 tip-distributor/src/lib.rs
create mode 100644 tip-distributor/src/merkle_root_generator_workflow.rs
create mode 100644 tip-distributor/src/merkle_root_upload_workflow.rs
create mode 100644 tip-distributor/src/reclaim_rent_workflow.rs
create mode 100644 tip-distributor/src/stake_meta_generator_workflow.rs
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..99262ca894
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,9 @@
+.dockerignore
+.git/
+.github/
+.gitignore
+.idea/
+README.md
+Dockerfile
+f
+target/
diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml
index 3b3d1779a1..fd1971e894 100644
--- a/.github/workflows/client-targets.yml
+++ b/.github/workflows/client-targets.yml
@@ -30,6 +30,8 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- run: cargo install cargo-ndk@2.12.2
@@ -54,6 +56,8 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- name: Setup Rust
run: |
diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml
index a47e7cde5f..9b57d633ad 100644
--- a/.github/workflows/crate-check.yml
+++ b/.github/workflows/crate-check.yml
@@ -18,6 +18,7 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0
+ submodules: 'recursive'
- name: Get commit range (push)
if: ${{ github.event_name == 'push' }}
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index fb2096bd33..e5ac907ea1 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -22,6 +22,7 @@ jobs:
uses: actions/checkout@v3
with:
fetch-depth: 0
+ submodules: 'recursive'
- name: Get commit range (push)
if: ${{ github.event_name == 'push' }}
@@ -77,6 +78,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v3
diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml
index 534a3190ff..26eef053cd 100644
--- a/.github/workflows/downstream-project-spl.yml
+++ b/.github/workflows/downstream-project-spl.yml
@@ -36,6 +36,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- shell: bash
run: |
@@ -84,6 +86,8 @@ jobs:
]
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- shell: bash
run: |
@@ -135,6 +139,8 @@ jobs:
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- shell: bash
run: |
diff --git a/.github/workflows/increment-cargo-version-on-release.yml b/.github/workflows/increment-cargo-version-on-release.yml
index 5592d76ca5..ca55af2155 100644
--- a/.github/workflows/increment-cargo-version-on-release.yml
+++ b/.github/workflows/increment-cargo-version-on-release.yml
@@ -11,6 +11,8 @@ jobs:
steps:
- name: Checkout Repository
uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
# This script confirms two assumptions:
# 1) Tag should be branch.
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 3e5ab89fe3..7090907550 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -26,6 +26,7 @@ jobs:
with:
ref: master
fetch-depth: 0
+ submodules: 'recursive'
- name: Setup Rust
shell: bash
diff --git a/.gitignore b/.gitignore
index 3167a9d720..f891833b15 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,7 @@
/solana-release.tar.bz2
/solana-metrics/
/solana-metrics.tar.bz2
-/target/
+**/target/
/test-ledger/
**/*.rs.bk
@@ -27,7 +27,11 @@ log-*/
# fetch-spl.sh artifacts
/spl-genesis-args.sh
/spl_*.so
+/jito_*.so
.DS_Store
# scripts that may be generated by cargo *-bpf commands
**/cargo-*-bpf-child-script-*.sh
+
+.env
+docker-output/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000000..e31fc7fccd
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,9 @@
+[submodule "anchor"]
+ path = anchor
+ url = https://github.com/jito-foundation/anchor.git
+[submodule "jito-programs"]
+ path = jito-programs
+ url = https://github.com/jito-foundation/jito-programs.git
+[submodule "jito-protos/protos"]
+ path = jito-protos/protos
+ url = https://github.com/jito-labs/mev-protos.git
diff --git a/Cargo.lock b/Cargo.lock
index 57dfd08a00..32b44617ed 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -116,6 +116,145 @@ dependencies = [
"alloc-no-stdlib",
]
+[[package]]
+name = "anchor-attribute-access-control"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "regex",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-account"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "bs58 0.4.0",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "rustversion",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-constant"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "proc-macro2 1.0.60",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-error"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-event"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-interface"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "heck 0.3.3",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-program"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-state"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-derive-accounts"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-lang"
+version = "0.24.2"
+dependencies = [
+ "anchor-attribute-access-control",
+ "anchor-attribute-account",
+ "anchor-attribute-constant",
+ "anchor-attribute-error",
+ "anchor-attribute-event",
+ "anchor-attribute-interface",
+ "anchor-attribute-program",
+ "anchor-attribute-state",
+ "anchor-derive-accounts",
+ "arrayref",
+ "base64 0.13.1",
+ "bincode",
+ "borsh 0.10.3",
+ "bytemuck",
+ "solana-program",
+ "thiserror",
+]
+
+[[package]]
+name = "anchor-syn"
+version = "0.24.2"
+dependencies = [
+ "anyhow",
+ "bs58 0.3.1",
+ "heck 0.3.3",
+ "proc-macro2 1.0.60",
+ "proc-macro2-diagnostics",
+ "quote 1.0.28",
+ "serde",
+ "serde_json",
+ "sha2 0.9.9",
+ "syn 1.0.109",
+ "thiserror",
+]
+
[[package]]
name = "android_system_properties"
version = "0.1.4"
@@ -277,9 +416,9 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545"
[[package]]
name = "arrayvec"
-version = "0.7.2"
+version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
+checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
[[package]]
name = "ascii"
@@ -446,7 +585,7 @@ checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc"
dependencies = [
"async-trait",
"axum-core",
- "bitflags",
+ "bitflags 1.3.2",
"bytes",
"futures-util",
"http",
@@ -537,7 +676,7 @@ version = "0.65.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"cexpr",
"clang-sys",
"lazy_static",
@@ -573,6 +712,12 @@ version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+[[package]]
+name = "bitflags"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
+
[[package]]
name = "bitmaps"
version = "2.1.0"
@@ -584,9 +729,9 @@ dependencies = [
[[package]]
name = "blake3"
-version = "1.3.3"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef"
+checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87"
dependencies = [
"arrayref",
"arrayvec",
@@ -659,7 +804,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b"
dependencies = [
"borsh-derive 0.10.3",
- "hashbrown 0.11.2",
+ "hashbrown 0.13.2",
]
[[package]]
@@ -753,6 +898,12 @@ dependencies = [
"alloc-stdlib",
]
+[[package]]
+name = "bs58"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb"
+
[[package]]
name = "bs58"
version = "0.4.0"
@@ -1011,7 +1162,7 @@ checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
dependencies = [
"ansi_term",
"atty",
- "bitflags",
+ "bitflags 1.3.2",
"strsim 0.8.0",
"textwrap 0.11.0",
"unicode-width",
@@ -1025,9 +1176,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
dependencies = [
"atty",
- "bitflags",
- "clap_derive",
- "clap_lex",
+ "bitflags 1.3.2",
+ "clap_derive 3.2.18",
+ "clap_lex 0.2.4",
"indexmap",
"once_cell",
"strsim 0.10.0",
@@ -1035,6 +1186,21 @@ dependencies = [
"textwrap 0.16.0",
]
+[[package]]
+name = "clap"
+version = "4.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42dfd32784433290c51d92c438bb72ea5063797fc3cc9a21a8c4346bebbb2098"
+dependencies = [
+ "bitflags 2.4.0",
+ "clap_derive 4.1.9",
+ "clap_lex 0.3.3",
+ "is-terminal",
+ "once_cell",
+ "strsim 0.10.0",
+ "termcolor",
+]
+
[[package]]
name = "clap_derive"
version = "3.2.18"
@@ -1048,6 +1214,19 @@ dependencies = [
"syn 1.0.109",
]
+[[package]]
+name = "clap_derive"
+version = "4.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fddf67631444a3a3e3e5ac51c36a5e01335302de677bd78759eaa90ab1f46644"
+dependencies = [
+ "heck 0.4.0",
+ "proc-macro-error",
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+]
+
[[package]]
name = "clap_lex"
version = "0.2.4"
@@ -1057,6 +1236,15 @@ dependencies = [
"os_str_bytes",
]
+[[package]]
+name = "clap_lex"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646"
+dependencies = [
+ "os_str_bytes",
+]
+
[[package]]
name = "combine"
version = "3.8.1"
@@ -1140,9 +1328,9 @@ dependencies = [
[[package]]
name = "constant_time_eq"
-version = "0.2.5"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b"
+checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
[[package]]
name = "convert_case"
@@ -1394,6 +1582,17 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
+[[package]]
+name = "default-env"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f753eb82d29277e79efc625e84aecacfd4851ee50e05a8573a4740239a77bfd3"
+dependencies = [
+ "proc-macro2 0.4.30",
+ "quote 0.6.13",
+ "syn 0.15.44",
+]
+
[[package]]
name = "der"
version = "0.5.1"
@@ -1970,7 +2169,7 @@ version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32c95766e0414f8bfc1d07055574c621b67739466d6ba516c4fef8e99d30d2e6"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"cfg-if 1.0.0",
"log",
"managed",
@@ -2160,7 +2359,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d"
dependencies = [
"base64 0.13.1",
- "bitflags",
+ "bitflags 1.3.2",
"bytes",
"headers-core",
"http",
@@ -2515,6 +2714,18 @@ version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9"
+[[package]]
+name = "is-terminal"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "io-lifetimes",
+ "rustix",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "itertools"
version = "0.10.5"
@@ -2530,6 +2741,49 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35"
+[[package]]
+name = "jito-programs-vote-state"
+version = "0.1.4"
+dependencies = [
+ "anchor-lang",
+ "bincode",
+ "serde",
+ "serde_derive",
+ "solana-program",
+]
+
+[[package]]
+name = "jito-protos"
+version = "1.16.18"
+dependencies = [
+ "bytes",
+ "prost 0.11.9",
+ "prost-types 0.11.9",
+ "protobuf-src",
+ "tonic 0.8.3",
+ "tonic-build 0.8.4",
+]
+
+[[package]]
+name = "jito-tip-distribution"
+version = "0.1.4"
+dependencies = [
+ "anchor-lang",
+ "default-env",
+ "jito-programs-vote-state",
+ "solana-program",
+ "solana-security-txt",
+]
+
+[[package]]
+name = "jito-tip-payment"
+version = "0.1.4"
+dependencies = [
+ "anchor-lang",
+ "default-env",
+ "solana-security-txt",
+]
+
[[package]]
name = "jobserver"
version = "0.1.24"
@@ -3051,7 +3305,7 @@ version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"cfg-if 1.0.0",
"libc",
"memoffset 0.7.1",
@@ -3316,7 +3570,7 @@ version = "0.10.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"cfg-if 1.0.0",
"foreign-types",
"libc",
@@ -3793,6 +4047,19 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "proc-macro2-diagnostics"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada"
+dependencies = [
+ "proc-macro2 1.0.60",
+ "quote 1.0.28",
+ "syn 1.0.109",
+ "version_check",
+ "yansi",
+]
+
[[package]]
name = "proptest"
version = "1.2.0"
@@ -3800,7 +4067,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65"
dependencies = [
"bit-set",
- "bitflags",
+ "bitflags 1.3.2",
"byteorder",
"lazy_static",
"num-traits",
@@ -4196,7 +4463,7 @@ version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
]
[[package]]
@@ -4205,7 +4472,7 @@ version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
]
[[package]]
@@ -4406,7 +4673,7 @@ version = "0.37.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bbfc1d1c7c40c01715f47d71444744a81669ca84e8b63e25a55e169b1f86433"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"errno",
"io-lifetimes",
"libc",
@@ -4564,7 +4831,7 @@ version = "2.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"core-foundation",
"core-foundation-sys",
"libc",
@@ -4937,7 +5204,7 @@ dependencies = [
"Inflector",
"base64 0.21.2",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"bv",
"lazy_static",
"serde",
@@ -5086,12 +5353,15 @@ dependencies = [
"futures 0.3.28",
"solana-banks-interface",
"solana-client",
+ "solana-gossip",
"solana-runtime",
"solana-sdk",
"solana-send-transaction-service",
+ "solana-streamer",
"tarpc",
"tokio",
"tokio-serde",
+ "tokio-stream",
]
[[package]]
@@ -5210,6 +5480,26 @@ dependencies = [
"tempfile",
]
+[[package]]
+name = "solana-bundle"
+version = "1.16.18"
+dependencies = [
+ "anchor-lang",
+ "assert_matches",
+ "itertools",
+ "log",
+ "serde",
+ "solana-ledger",
+ "solana-logger",
+ "solana-measure",
+ "solana-poh",
+ "solana-program-runtime",
+ "solana-runtime",
+ "solana-sdk",
+ "solana-transaction-status",
+ "thiserror",
+]
+
[[package]]
name = "solana-cargo-build-bpf"
version = "1.16.18"
@@ -5299,7 +5589,7 @@ name = "solana-cli"
version = "1.16.18"
dependencies = [
"bincode",
- "bs58",
+ "bs58 0.4.0",
"clap 2.33.3",
"console",
"const_format",
@@ -5496,9 +5786,10 @@ dependencies = [
name = "solana-core"
version = "1.16.18"
dependencies = [
+ "anchor-lang",
"base64 0.21.2",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"chrono",
"crossbeam-channel",
"dashmap 4.0.2",
@@ -5507,12 +5798,17 @@ dependencies = [
"fs_extra",
"histogram",
"itertools",
+ "jito-protos",
+ "jito-tip-distribution",
+ "jito-tip-payment",
"lazy_static",
"log",
"lru",
"matches",
"min-max-heap",
"num_enum 0.6.1",
+ "prost 0.11.9",
+ "prost-types 0.11.9",
"rand 0.7.3",
"rand_chacha 0.2.2",
"raptorq",
@@ -5525,6 +5821,7 @@ dependencies = [
"serial_test",
"solana-address-lookup-table-program",
"solana-bloom",
+ "solana-bundle",
"solana-client",
"solana-entry",
"solana-frozen-abi",
@@ -5539,6 +5836,7 @@ dependencies = [
"solana-perf",
"solana-poh",
"solana-program-runtime",
+ "solana-program-test",
"solana-rayon-threadlimit",
"solana-rpc",
"solana-rpc-client-api",
@@ -5561,6 +5859,8 @@ dependencies = [
"test-case",
"thiserror",
"tokio",
+ "tonic 0.8.3",
+ "tonic-build 0.8.4",
"trees",
]
@@ -5669,7 +5969,7 @@ dependencies = [
"ahash 0.8.3",
"blake3",
"block-buffer 0.10.4",
- "bs58",
+ "bs58 0.4.0",
"bv",
"byteorder",
"cc",
@@ -5753,7 +6053,7 @@ dependencies = [
name = "solana-geyser-plugin-manager"
version = "1.16.18"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"json5",
"jsonrpc-core",
@@ -5861,7 +6161,7 @@ dependencies = [
name = "solana-keygen"
version = "1.16.18"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"clap 3.2.23",
"dirs-next",
"num_cpus",
@@ -5880,8 +6180,8 @@ version = "1.16.18"
dependencies = [
"assert_matches",
"bincode",
- "bitflags",
- "bs58",
+ "bitflags 1.3.2",
+ "bs58 0.4.0",
"byteorder",
"chrono",
"chrono-humanize",
@@ -5943,7 +6243,7 @@ name = "solana-ledger-tool"
version = "1.16.18"
dependencies = [
"assert_cmd",
- "bs58",
+ "bs58 0.4.0",
"bytecount",
"chrono",
"clap 2.33.3",
@@ -6224,11 +6524,11 @@ dependencies = [
"assert_matches",
"base64 0.21.2",
"bincode",
- "bitflags",
+ "bitflags 1.3.2",
"blake3",
"borsh 0.10.3",
"borsh 0.9.3",
- "bs58",
+ "bs58 0.4.0",
"bv",
"bytemuck",
"cc",
@@ -6406,7 +6706,7 @@ version = "1.16.18"
dependencies = [
"base64 0.21.2",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"dashmap 4.0.2",
"itertools",
@@ -6426,6 +6726,7 @@ dependencies = [
"soketto",
"solana-account-decoder",
"solana-address-lookup-table-program",
+ "solana-bundle",
"solana-client",
"solana-entry",
"solana-faucet",
@@ -6436,6 +6737,7 @@ dependencies = [
"solana-net-utils",
"solana-perf",
"solana-poh",
+ "solana-program-runtime",
"solana-rayon-threadlimit",
"solana-rpc-client-api",
"solana-runtime",
@@ -6466,7 +6768,7 @@ dependencies = [
"async-trait",
"base64 0.21.2",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"futures 0.3.28",
"indicatif",
@@ -6492,7 +6794,7 @@ name = "solana-rpc-client-api"
version = "1.16.18"
dependencies = [
"base64 0.21.2",
- "bs58",
+ "bs58 0.4.0",
"jsonrpc-core",
"reqwest",
"semver 1.0.17",
@@ -6500,6 +6802,8 @@ dependencies = [
"serde_derive",
"serde_json",
"solana-account-decoder",
+ "solana-bundle",
+ "solana-runtime",
"solana-sdk",
"solana-transaction-status",
"solana-version",
@@ -6529,13 +6833,14 @@ name = "solana-rpc-test"
version = "1.16.18"
dependencies = [
"bincode",
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"futures-util",
"log",
"reqwest",
"serde",
"serde_json",
+ "serial_test",
"solana-account-decoder",
"solana-client",
"solana-logger",
@@ -6629,13 +6934,14 @@ dependencies = [
name = "solana-sdk"
version = "1.16.18"
dependencies = [
+ "anchor-lang",
"anyhow",
"assert_matches",
"base64 0.21.2",
"bincode",
- "bitflags",
+ "bitflags 1.3.2",
"borsh 0.10.3",
- "bs58",
+ "bs58 0.4.0",
"bytemuck",
"byteorder",
"chrono",
@@ -6685,13 +6991,19 @@ dependencies = [
name = "solana-sdk-macro"
version = "1.16.18"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"proc-macro2 1.0.60",
"quote 1.0.28",
"rustversion",
"syn 2.0.18",
]
+[[package]]
+name = "solana-security-txt"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183"
+
[[package]]
name = "solana-send-transaction-service"
version = "1.16.18"
@@ -6699,11 +7011,13 @@ dependencies = [
"crossbeam-channel",
"log",
"solana-client",
+ "solana-gossip",
"solana-logger",
"solana-measure",
"solana-metrics",
"solana-runtime",
"solana-sdk",
+ "solana-streamer",
"solana-tpu-client",
]
@@ -6777,7 +7091,7 @@ name = "solana-storage-proto"
version = "1.16.18"
dependencies = [
"bincode",
- "bs58",
+ "bs58 0.4.0",
"enum-iterator",
"prost 0.11.9",
"protobuf-src",
@@ -6889,6 +7203,36 @@ dependencies = [
"solana-sdk",
]
+[[package]]
+name = "solana-tip-distributor"
+version = "1.16.18"
+dependencies = [
+ "anchor-lang",
+ "clap 4.1.11",
+ "env_logger",
+ "futures 0.3.28",
+ "im",
+ "itertools",
+ "jito-tip-distribution",
+ "jito-tip-payment",
+ "log",
+ "num-traits",
+ "serde",
+ "serde_json",
+ "solana-client",
+ "solana-genesis-utils",
+ "solana-ledger",
+ "solana-merkle-tree",
+ "solana-metrics",
+ "solana-program",
+ "solana-rpc-client-api",
+ "solana-runtime",
+ "solana-sdk",
+ "solana-stake-program",
+ "thiserror",
+ "tokio",
+]
+
[[package]]
name = "solana-tokens"
version = "1.16.18"
@@ -6980,7 +7324,7 @@ dependencies = [
"base64 0.21.2",
"bincode",
"borsh 0.10.3",
- "bs58",
+ "bs58 0.4.0",
"lazy_static",
"log",
"serde",
@@ -7078,6 +7422,7 @@ dependencies = [
"symlink",
"thiserror",
"tikv-jemallocator",
+ "tonic 0.8.3",
]
[[package]]
@@ -7139,7 +7484,7 @@ dependencies = [
name = "solana-zk-keygen"
version = "1.16.18"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"clap 3.2.23",
"dirs-next",
"num_cpus",
@@ -7581,7 +7926,7 @@ version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225e483f02d0ad107168dc57381a8a40c3aeea6abe47f37506931f861643cfa8"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"byteorder",
"libc",
"thiserror",
@@ -8040,6 +8385,7 @@ dependencies = [
"pin-project",
"prost 0.11.9",
"prost-derive 0.11.9",
+ "rustls-native-certs",
"rustls-pemfile 1.0.0",
"tokio",
"tokio-rustls 0.23.3",
@@ -8050,6 +8396,7 @@ dependencies = [
"tower-service",
"tracing",
"tracing-futures",
+ "webpki-roots",
]
[[package]]
@@ -8103,7 +8450,7 @@ version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858"
dependencies = [
- "bitflags",
+ "bitflags 1.3.2",
"bytes",
"futures-core",
"futures-util",
@@ -8835,6 +9182,12 @@ dependencies = [
"linked-hash-map",
]
+[[package]]
+name = "yansi"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
+
[[package]]
name = "yasna"
version = "0.5.0"
diff --git a/Cargo.toml b/Cargo.toml
index a1981fe76f..546e76d7e7 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -11,6 +11,7 @@ members = [
"bench-tps",
"bloom",
"bucket_map",
+ "bundle",
"clap-utils",
"clap-v3-utils",
"cli",
@@ -32,6 +33,7 @@ members = [
"geyser-plugin-manager",
"gossip",
"install",
+ "jito-protos",
"keygen",
"ledger",
"ledger-tool",
@@ -93,6 +95,7 @@ members = [
"streamer",
"test-validator",
"thin-client",
+ "tip-distributor",
"tokens",
"tpu-client",
"transaction-dos",
@@ -107,6 +110,8 @@ members = [
]
exclude = [
+ "anchor",
+ "jito-programs",
"programs/sbf",
]
@@ -125,6 +130,7 @@ edition = "2021"
aes-gcm-siv = "0.10.3"
ahash = "0.8.3"
anyhow = "1.0.71"
+anchor-lang = { path = "anchor/lang" }
ark-bn254 = "0.4.0"
ark-ec = "0.4.0"
ark-ff = "0.4.0"
@@ -210,6 +216,9 @@ indicatif = "0.17.4"
Inflector = "0.11.4"
itertools = "0.10.5"
jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] }
+jito-protos = { path = "jito-protos", version = "=1.16.18" }
+jito-tip-distribution = { path = "jito-programs/mev-programs/programs/tip-distribution", features = ["no-entrypoint"] }
+jito-tip-payment = { path = "jito-programs/mev-programs/programs/tip-payment", features = ["no-entrypoint"] }
js-sys = "0.3.63"
json5 = "0.4.1"
jsonrpc-core = "18.0.0"
@@ -298,6 +307,7 @@ solana-bench-tps = { path = "bench-tps", version = "=1.16.18" }
solana-bloom = { path = "bloom", version = "=1.16.18" }
solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.16.18" }
solana-bucket-map = { path = "bucket_map", version = "=1.16.18" }
+solana-bundle = { path = "bundle", version = "=1.16.18" }
solana-connection-cache = { path = "connection-cache", version = "=1.16.18", default-features = false }
solana-clap-utils = { path = "clap-utils", version = "=1.16.18" }
solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.16.18" }
diff --git a/README.md b/README.md
index 4fccacf2ba..750e797895 100644
--- a/README.md
+++ b/README.md
@@ -4,142 +4,9 @@
-[![Solana crate](https://img.shields.io/crates/v/solana-core.svg)](https://crates.io/crates/solana-core)
-[![Solana documentation](https://docs.rs/solana-core/badge.svg)](https://docs.rs/solana-core)
-[![Build status](https://badge.buildkite.com/8cc350de251d61483db98bdfc895b9ea0ac8ffa4a32ee850ed.svg?branch=master)](https://buildkite.com/solana-labs/solana/builds?branch=master)
-[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
+[![Build status](https://badge.buildkite.com/3a7c88c0f777e1a0fddacc190823565271ae4c251ef78d83a8.svg)](https://buildkite.com/jito/jito-solana)
-# Building
+# About
+This repository contains Jito's fork of the Solana validator.
-## **1. Install rustc, cargo and rustfmt.**
-
-```bash
-$ curl https://sh.rustup.rs -sSf | sh
-$ source $HOME/.cargo/env
-$ rustup component add rustfmt
-```
-
-When building the master branch, please make sure you are using the latest stable rust version by running:
-
-```bash
-$ rustup update
-```
-
-When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running:
-```bash
-$ rustup install VERSION
-```
-Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version.
-
-On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc.
-
-On Ubuntu:
-```bash
-$ sudo apt-get update
-$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make libprotobuf-dev protobuf-compiler
-```
-
-On Fedora:
-```bash
-$ sudo dnf install openssl-devel systemd-devel pkg-config zlib-devel llvm clang cmake make protobuf-devel protobuf-compiler perl-core
-```
-
-## **2. Download the source code.**
-
-```bash
-$ git clone https://github.com/solana-labs/solana.git
-$ cd solana
-```
-
-## **3. Build.**
-
-```bash
-$ ./cargo build
-```
-
-# Testing
-
-**Run the test suite:**
-
-```bash
-$ ./cargo test
-```
-
-### Starting a local testnet
-Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps).
-
-### Accessing the remote development cluster
-* `devnet` - stable public cluster for development accessible via
-devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters)
-
-# Benchmarking
-
-First, install the nightly build of rustc. `cargo bench` requires the use of the
-unstable features only available in the nightly build.
-
-```bash
-$ rustup install nightly
-```
-
-Run the benchmarks:
-
-```bash
-$ cargo +nightly bench
-```
-
-# Release Process
-
-The release process for this project is described [here](RELEASE.md).
-
-# Code coverage
-
-To generate code coverage statistics:
-
-```bash
-$ scripts/coverage.sh
-$ open target/cov/lcov-local/index.html
-```
-
-Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
-productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
-some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
-the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
-test *protects* your solution from future changes. Say you don't understand why a line of code exists,
-try deleting it and running the unit-tests. The nearest test failure should tell you what problem
-was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
-problem is solved by this code?" On the other hand, if a test does fail and you can think of a
-better way to solve the same problem, a Pull Request with your solution would most certainly be
-welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
-send us that patch!
-
-# Disclaimer
-
-All claims, content, designs, algorithms, estimates, roadmaps,
-specifications, and performance measurements described in this project
-are done with the Solana Labs, Inc. (“SL”) good faith efforts. It is up to
-the reader to check and validate their accuracy and truthfulness.
-Furthermore, nothing in this project constitutes a solicitation for
-investment.
-
-Any content produced by SL or developer resources that SL provides are
-for educational and inspirational purposes only. SL does not encourage,
-induce or sanction the deployment, integration or use of any such
-applications (including the code comprising the Solana blockchain
-protocol) in violation of applicable laws or regulations and hereby
-prohibits any such deployment, integration or use. This includes the use of
-any such applications by the reader (a) in violation of export control
-or sanctions laws of the United States or any other applicable
-jurisdiction, (b) if the reader is located in or ordinarily resident in
-a country or territory subject to comprehensive sanctions administered
-by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the
-reader is or is working on behalf of a Specially Designated National
-(SDN) or a person subject to similar blocking or denied party
-prohibitions.
-
-The reader should be aware that U.S. export control and sanctions laws prohibit
-U.S. persons (and other persons that are subject to such laws) from transacting
-with persons in certain countries and territories or that are on the SDN list.
-Accordingly, there is a risk to individuals that other persons using any of the
-code contained in this repo, or a derivation thereof, may be sanctioned persons
-and that transactions with such persons would be a violation of U.S. export
-controls and sanctions law.
+We recommend checking out our [Gitbook](https://jito-foundation.gitbook.io/mev/jito-solana/building-the-software) for more detailed instructions on building and running Jito-Solana.
diff --git a/SECURITY.md b/SECURITY.md
deleted file mode 100644
index 48326f1497..0000000000
--- a/SECURITY.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Security Policy
-
-1. [Reporting security problems](#reporting)
-4. [Security Bug Bounties](#bounty)
-2. [Incident Response Process](#process)
-
-
-## Reporting security problems in the Solana Labs Validator Client
-
-**DO NOT CREATE A GITHUB ISSUE** to report a security problem.
-
-Instead please use this [Report a Vulnerability](https://github.com/solana-labs/solana/security/advisories/new) link.
-Provide a helpful title, detailed description of the vulnerability and an exploit
-proof-of-concept. Speculative submissions without proof-of-concept will be closed
-with no further consideration.
-
-If you haven't done so already, please **enable two-factor auth** in your GitHub account.
-
-Expect a response as fast as possible in the advisory, typically within 72 hours.
-
---
-
-If you do not receive a response in the advisory, send an email to
-security@solanalabs.com with the full URL of the advisory you have created. DO NOT
-include attachments or provide detail sufficient for exploitation regarding the
-security issue in this email. **Only provide such details in the advisory**.
-
-If you do not receive a response from security@solanalabs.com please followup with
-the team directly. You can do this in the `#core-technology` channel of the
-[Solana Tech discord server](https://solana.com/discord), by pinging the `Solana Labs`
-role in the channel and referencing the fact that you submitted a security problem.
-
-
-## Incident Response Process
-
-In case an incident is discovered or reported, the following process will be
-followed to contain, respond and remediate:
-
-### 1. Accept the new report
-In response a newly reported security problem, a member of the
-`solana-labs/admins` group will accept the report to turn it into a draft
-advisory. The `solana-labs/security-incident-response` group should be added to
-the draft security advisory, and create a private fork of the repository (grey
-button towards the bottom of the page) if necessary.
-
-If the advisory is the result of an audit finding, follow the same process as above but add the auditor's github user(s) and begin the title with "[Audit]".
-
-If the report is out of scope, a member of the `solana-labs/admins` group will
-comment as such and then close the report.
-
-### 2. Triage
-Within the draft security advisory, discuss and determine the severity of the issue. If necessary, members of the solana-labs/security-incident-response group may add other github users to the advisory to assist.
-If it is determined that this not a critical network issue then the advisory should be closed and if more follow-up is required a normal Solana public github issue should be created.
-
-### 3. Prepare Fixes
-For the affected branches, typically all three (edge, beta and stable), prepare a fix for the issue and push them to the corresponding branch in the private repository associated with the draft security advisory.
-There is no CI available in the private repository so you must build from source and manually verify fixes.
-Code review from the reporter is ideal, as well as from multiple members of the core development team.
-
-### 4. Notify Security Group Validators
-Once an ETA is available for the fix, a member of the solana-labs/security-incident-response group should notify the validators so they can prepare for an update using the "Solana Red Alert" notification system.
-The teams are all over the world and it's critical to provide actionable information at the right time. Don't be the person that wakes everybody up at 2am when a fix won't be available for hours.
-
-### 5. Ship the patch
-Once the fix is accepted, a member of the solana-labs/security-incident-response group should prepare a single patch file for each affected branch. The commit title for the patch should only contain the advisory id, and not disclose any further details about the incident.
-Copy the patches to https://release.solana.com/ under a subdirectory named after the advisory id (example: https://release.solana.com/GHSA-hx59-f5g4-jghh/v1.4.patch). Contact a member of the solana-labs/admins group if you require access to release.solana.com
-Using the "Solana Red Alert" channel:
- a) Notify validators that there's an issue and a patch will be provided in X minutes
- b) If X minutes expires and there's no patch, notify of the delay and provide a new ETA
- c) Provide links to patches of https://release.solana.com/ for each affected branch
-Validators can be expected to build the patch from source against the latest release for the affected branch.
-Since the software version will not change after the patch is applied, request that each validator notify in the existing channel once they've updated. Manually monitor the roll out until a sufficient amount of stake has updated - typically at least 33.3% or 66.6% depending on the issue.
-
-### 6. Public Disclosure and Release
-Once the fix has been deployed to the security group validators, the patches from the security advisory may be merged into the main source repository. A new official release for each affected branch should be shipped and all validators requested to upgrade as quickly as possible.
-
-### 7. Security Advisory Bounty Accounting and Cleanup
-If this issue is [eligible](#eligibility) for a bounty, prefix the title of the
-security advisory with one of the following, depending on the severity:
-- [Bounty Category: Critical: Loss of Funds]
-- [Bounty Category: Critical: Consensus / Safety Violations]
-- [Bounty Category: Critical: Liveness / Loss of Availability]
-- [Bounty Category: Critical: DoS Attacks]
-- [Bounty Category: Supply Chain Attacks]
-- [Bounty Category: RPC]
-
-Confirm with the reporter that they agree with the severity assessment, and discuss as required to reach a conclusion.
-
-We currently do not use the Github workflow to publish security advisories. Once the issue and fix have been disclosed, and a bounty category is assessed if appropriate, the GitHub security advisory is no longer needed and can be closed.
-
-
-## Security Bug Bounties
-The Solana Foundation offer bounties for critical Solana security issues. Please
-see below for more details. Either a demonstration or a valid bug report is all
-that's necessary to submit a bug bounty. A patch to fix the issue isn't
-required.
-
-#### Loss of Funds:
-$2,000,000 USD in locked SOL tokens (locked for 12 months)
-* Theft of funds without users signature from any account
-* Theft of funds without users interaction in system, token, stake, vote programs
-* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes.
-
-#### Consensus/Safety Violations:
-$1,000,000 USD in locked SOL tokens (locked for 12 months)
-* Consensus safety violation
-* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc.
-
-#### Liveness / Loss of Availability:
-$400,000 USD in locked SOL tokens (locked for 12 months)
-* Whereby consensus halts and requires human intervention
-* Eclipse attacks,
-* Remote attacks that partition the network,
-
-#### DoS Attacks:
-$100,000 USD in locked SOL tokens (locked for 12 months)
-* Remote resource exaustion via Non-RPC protocols
-
-#### Supply Chain Attacks:
-$100,000 USD in locked SOL tokens (locked for 12 months)
-* Non-social attacks against source code change management, automated testing, release build, release publication and release hosting infrastructure of the monorepo.
-
-#### RPC DoS/Crashes:
-$5,000 USD in locked SOL tokens (locked for 12 months)
-* RPC attacks
-
-### Out of Scope:
-The following components are out of scope for the bounty program
-* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com
-* Any encrypted credentials, auth tokens, etc. checked into the repo
-* Bugs in dependencies. Please take them upstream!
-* Attacks that require social engineering
-* Any undeveloped automated tooling (scanners, etc) results. (OK with developed PoC)
-* Any asset whose source code does not exist in this repository (including, but not limited
-to, any and all web properties not explicitly listed on this page)
-
-### Eligibility:
-* Submissions _MUST_ include an exploit proof-of-concept to be considered eligible
-* The participant submitting the bug report shall follow the process outlined within this document
-* Valid exploits can be eligible even if they are not successfully executed on a public cluster
-* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
-* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.foundation/kyc. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
-
-### Duplicate Reports
-Compensation for duplicative reports will be split among reporters with first to report taking priority using the following equation
-```
-R: total reports
-ri: report priority
-bi: bounty share
-
-bi = 2 ^ (R - ri) / ((2^R) - 1)
-```
-#### Bounty Split Examples
-| total reports | priority | share | | total reports | priority | share | | total reports | priority | share |
-| ------------- | -------- | -----: | - | ------------- | -------- | -----: | - | ------------- | -------- | -----: |
-| 1 | 1 | 100% | | 2 | 1 | 66.67% | | 5 | 1 | 51.61% |
-| | | | | 2 | 2 | 33.33% | | 5 | 2 | 25.81% |
-| 4 | 1 | 53.33% | | | | | | 5 | 3 | 12.90% |
-| 4 | 2 | 26.67% | | 3 | 1 | 57.14% | | 5 | 4 | 6.45% |
-| 4 | 3 | 13.33% | | 3 | 2 | 28.57% | | 5 | 5 | 3.23% |
-| 4 | 4 | 6.67% | | 3 | 3 | 14.29% | | | | |
-
-### Payment of Bug Bounties:
-* Bounties are currently awarded on a rolling/weekly basis and paid out within 30 days upon receipt of an invoice.
-* The SOL/USD conversion rate used for payments is the market price of SOL (denominated in USD) at the end of the day the invoice is submitted by the researcher.
-* The reference for this price is the Closing Price given by Coingecko.com on that date given here: https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
-* Bug bounties that are paid out in SOL are paid to stake accounts with a lockup expiring 12 months from the date of delivery of SOL.
diff --git a/anchor b/anchor
new file mode 160000
index 0000000000..4f52f41cbe
--- /dev/null
+++ b/anchor
@@ -0,0 +1 @@
+Subproject commit 4f52f41cbeafb77d85c7b712516dfbeb5b86dd5f
diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs
index f817df75e5..5ac679bf16 100644
--- a/banking-bench/src/main.rs
+++ b/banking-bench/src/main.rs
@@ -9,6 +9,7 @@ use {
solana_core::{
banking_stage::BankingStage,
banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
},
solana_gossip::cluster_info::{ClusterInfo, Node},
solana_ledger::{
@@ -36,6 +37,7 @@ use {
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE,
std::{
+ collections::HashSet,
sync::{atomic::Ordering, Arc, RwLock},
thread::sleep,
time::{Duration, Instant},
@@ -57,9 +59,15 @@ fn check_txs(
let now = Instant::now();
let mut no_bank = false;
loop {
- if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::from_millis(10))
+ if let Ok(WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }) = receiver.recv_timeout(Duration::from_millis(10))
{
- total += entry.transactions.len();
+ total += entries_ticks
+ .iter()
+ .map(|e| e.0.transactions.len())
+ .sum::();
}
if total >= ref_tx_count {
break;
@@ -459,6 +467,8 @@ fn main() {
Arc::new(connection_cache),
bank_forks.clone(),
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
// This is so that the signal_receiver does not go out of scope after the closure.
diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml
index 4c29dcd30d..ffa0ac7ceb 100644
--- a/banks-server/Cargo.toml
+++ b/banks-server/Cargo.toml
@@ -15,12 +15,17 @@ crossbeam-channel = { workspace = true }
futures = { workspace = true }
solana-banks-interface = { workspace = true }
solana-client = { workspace = true }
+solana-gossip = { workspace = true }
solana-runtime = { workspace = true }
solana-sdk = { workspace = true }
solana-send-transaction-service = { workspace = true }
tarpc = { workspace = true, features = ["full"] }
tokio = { workspace = true, features = ["full"] }
tokio-serde = { workspace = true, features = ["bincode"] }
+tokio-stream = { workspace = true }
+
+[dev-dependencies]
+solana-streamer = { workspace = true }
[lib]
crate-type = ["lib"]
diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs
index cee47e8108..0eb057b2fc 100644
--- a/banks-server/src/banks_server.rs
+++ b/banks-server/src/banks_server.rs
@@ -8,6 +8,7 @@ use {
TransactionSimulationDetails, TransactionStatus,
},
solana_client::connection_cache::ConnectionCache,
+ solana_gossip::cluster_info::ClusterInfo,
solana_runtime::{
bank::{Bank, TransactionExecutionResult, TransactionSimulationResult},
bank_forks::BankForks,
@@ -439,7 +440,7 @@ pub async fn start_local_server(
pub async fn start_tcp_server(
listen_addr: SocketAddr,
- tpu_addr: SocketAddr,
+ cluster_info: Arc,
bank_forks: Arc>,
block_commitment_cache: Arc>,
connection_cache: Arc,
@@ -464,7 +465,7 @@ pub async fn start_tcp_server(
let (sender, receiver) = unbounded();
SendTransactionService::new::(
- tpu_addr,
+ cluster_info.clone(),
&bank_forks,
None,
receiver,
diff --git a/bootstrap b/bootstrap
new file mode 100755
index 0000000000..4f8955e5ca
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+set -eu
+
+BANK_HASH=$(cargo run --release --bin solana-ledger-tool -- -l config/bootstrap-validator bank-hash)
+
+# increase max file handle limit
+ulimit -Hn 1000000
+
+# if above fails, run:
+# sudo bash -c 'echo "* hard nofile 1000000" >> /etc/security/limits.conf'
+
+# NOTE: make sure tip-payment and tip-distribution program are deployed using the correct pubkeys
+RUST_LOG=INFO,solana_core::bundle_stage=DEBUG \
+ NDEBUG=1 ./multinode-demo/bootstrap-validator.sh \
+ --wait-for-supermajority 0 \
+ --expected-bank-hash "$BANK_HASH" \
+ --block-engine-address http://127.0.0.1:1003 \
+ --block-engine-auth-service-address http://127.0.0.1:1005 \
+ --relayer-auth-service-address http://127.0.0.1:11226 \
+ --relayer-address http://127.0.0.1:11226 \
+ --rpc-pubsub-enable-block-subscription \
+ --enable-rpc-transaction-history \
+ --tip-payment-program-pubkey T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt \
+ --tip-distribution-program-pubkey 4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7 \
+ --commission-bps 0 \
+ --shred-receiver-address 127.0.0.1:1002 \
+ --trust-relayer-packets \
+ --trust-block-engine-packets
diff --git a/bundle/Cargo.toml b/bundle/Cargo.toml
new file mode 100644
index 0000000000..e8bdd2cb4d
--- /dev/null
+++ b/bundle/Cargo.toml
@@ -0,0 +1,34 @@
+[package]
+name = "solana-bundle"
+description = "Library related to handling bundles"
+documentation = "https://docs.rs/solana-bundle"
+readme = "../README.md"
+version = { workspace = true }
+authors = { workspace = true }
+repository = { workspace = true }
+homepage = { workspace = true }
+license = { workspace = true }
+edition = { workspace = true }
+
+[dependencies]
+anchor-lang = { workspace = true }
+itertools = { workspace = true }
+log = { workspace = true }
+serde = { workspace = true }
+solana-ledger = { workspace = true }
+solana-logger = { workspace = true }
+solana-measure = { workspace = true }
+solana-poh = { workspace = true }
+solana-program-runtime = { workspace = true }
+solana-runtime = { workspace = true }
+solana-sdk = { workspace = true }
+solana-transaction-status = { workspace = true }
+thiserror = { workspace = true }
+
+[dev-dependencies]
+assert_matches = { workspace = true }
+solana-logger = { workspace = true }
+
+[lib]
+crate-type = ["lib"]
+name = "solana_bundle"
diff --git a/bundle/src/bundle_execution.rs b/bundle/src/bundle_execution.rs
new file mode 100644
index 0000000000..48a78194c2
--- /dev/null
+++ b/bundle/src/bundle_execution.rs
@@ -0,0 +1,1186 @@
+use {
+ itertools::izip,
+ log::*,
+ solana_ledger::token_balances::collect_token_balances,
+ solana_measure::{measure::Measure, measure_us},
+ solana_program_runtime::timings::ExecuteTimings,
+ solana_runtime::{
+ account_overrides::AccountOverrides,
+ accounts::TransactionLoadResult,
+ bank::{
+ Bank, LoadAndExecuteTransactionsOutput, TransactionBalances, TransactionExecutionResult,
+ },
+ transaction_batch::TransactionBatch,
+ },
+ solana_sdk::{
+ account::AccountSharedData,
+ bundle::SanitizedBundle,
+ pubkey::Pubkey,
+ saturating_add_assign,
+ signature::Signature,
+ transaction::{SanitizedTransaction, TransactionError, VersionedTransaction},
+ },
+ solana_transaction_status::{token_balances::TransactionTokenBalances, PreBalanceInfo},
+ std::{
+ cmp::{max, min},
+ time::{Duration, Instant},
+ },
+ thiserror::Error,
+};
+
+#[derive(Clone, Default)]
+pub struct BundleExecutionMetrics {
+ pub num_retries: u64,
+ pub collect_balances_us: u64,
+ pub load_execute_us: u64,
+ pub collect_pre_post_accounts_us: u64,
+ pub cache_accounts_us: u64,
+ pub execute_timings: ExecuteTimings,
+}
+
+/// Contains the results from executing each TransactionBatch with a final result associated with it
+/// Note that if !result.is_ok(), bundle_transaction_results will not contain the output for every transaction.
+pub struct LoadAndExecuteBundleOutput<'a> {
+ bundle_transaction_results: Vec>,
+ result: LoadAndExecuteBundleResult<()>,
+ metrics: BundleExecutionMetrics,
+}
+
+impl<'a> LoadAndExecuteBundleOutput<'a> {
+ pub fn executed_ok(&self) -> bool {
+ self.result.is_ok()
+ }
+
+ pub fn result(&self) -> &LoadAndExecuteBundleResult<()> {
+ &self.result
+ }
+
+ pub fn bundle_transaction_results_mut(&mut self) -> &'a mut [BundleTransactionsOutput] {
+ &mut self.bundle_transaction_results
+ }
+
+ pub fn bundle_transaction_results(&self) -> &'a [BundleTransactionsOutput] {
+ &self.bundle_transaction_results
+ }
+
+ pub fn executed_transaction_batches(&self) -> Vec> {
+ self.bundle_transaction_results
+ .iter()
+ .map(|br| br.executed_versioned_transactions())
+ .collect()
+ }
+
+ pub fn metrics(&self) -> BundleExecutionMetrics {
+ self.metrics.clone()
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum LoadAndExecuteBundleError {
+ #[error("Bundle execution timed out")]
+ ProcessingTimeExceeded(Duration),
+
+ #[error(
+ "A transaction in the bundle encountered a lock error: [signature={:?}, transaction_error={:?}]",
+ signature,
+ transaction_error
+ )]
+ LockError {
+ signature: Signature,
+ transaction_error: TransactionError,
+ },
+
+ #[error(
+ "A transaction in the bundle failed to execute: [signature={:?}, execution_result={:?}",
+ signature,
+ execution_result
+ )]
+ TransactionError {
+ signature: Signature,
+ // Box reduces the size between variants in the Error
+ execution_result: Box,
+ },
+
+ #[error("Invalid pre or post accounts")]
+ InvalidPreOrPostAccounts,
+}
+
+pub struct BundleTransactionsOutput<'a> {
+ transactions: &'a [SanitizedTransaction],
+ load_and_execute_transactions_output: LoadAndExecuteTransactionsOutput,
+ pre_balance_info: PreBalanceInfo,
+ post_balance_info: (TransactionBalances, TransactionTokenBalances),
+ // the length of the outer vector should be the same as transactions.len()
+ // for indices that didn't get executed, expect a None.
+ pre_tx_execution_accounts: Vec>>,
+ post_tx_execution_accounts: Vec >>,
+}
+
+impl<'a> BundleTransactionsOutput<'a> {
+ pub fn executed_versioned_transactions(&self) -> Vec {
+ self.transactions
+ .iter()
+ .zip(
+ self.load_and_execute_transactions_output
+ .execution_results
+ .iter(),
+ )
+ .filter_map(|(tx, exec_result)| {
+ exec_result
+ .was_executed()
+ .then_some(tx.to_versioned_transaction())
+ })
+ .collect()
+ }
+
+ pub fn executed_transactions(&self) -> Vec<&'a SanitizedTransaction> {
+ self.transactions
+ .iter()
+ .zip(
+ self.load_and_execute_transactions_output
+ .execution_results
+ .iter(),
+ )
+ .filter_map(|(tx, exec_result)| exec_result.was_executed().then_some(tx))
+ .collect()
+ }
+
+ pub fn load_and_execute_transactions_output(&self) -> &LoadAndExecuteTransactionsOutput {
+ &self.load_and_execute_transactions_output
+ }
+
+ pub fn transactions(&self) -> &[SanitizedTransaction] {
+ self.transactions
+ }
+
+ pub fn loaded_transactions_mut(&mut self) -> &mut [TransactionLoadResult] {
+ &mut self
+ .load_and_execute_transactions_output
+ .loaded_transactions
+ }
+
+ pub fn execution_results(&self) -> &[TransactionExecutionResult] {
+ &self.load_and_execute_transactions_output.execution_results
+ }
+
+ pub fn pre_balance_info(&mut self) -> &mut PreBalanceInfo {
+ &mut self.pre_balance_info
+ }
+
+ pub fn post_balance_info(&self) -> &(TransactionBalances, TransactionTokenBalances) {
+ &self.post_balance_info
+ }
+
+ pub fn pre_tx_execution_accounts(&self) -> &Vec>> {
+ &self.pre_tx_execution_accounts
+ }
+
+ pub fn post_tx_execution_accounts(&self) -> &Vec >> {
+ &self.post_tx_execution_accounts
+ }
+}
+
+pub type LoadAndExecuteBundleResult = Result;
+
+/// Return an Error if a transaction was executed and reverted
+/// NOTE: `execution_results` are zipped with `sanitized_txs` so it's expected a sanitized tx at
+/// position i has a corresponding execution result at position i within the `execution_results`
+/// slice
+pub fn check_bundle_execution_results<'a>(
+ execution_results: &'a [TransactionExecutionResult],
+ sanitized_txs: &'a [SanitizedTransaction],
+) -> Result<(), (&'a SanitizedTransaction, &'a TransactionExecutionResult)> {
+ for (exec_results, sanitized_tx) in execution_results.iter().zip(sanitized_txs) {
+ match exec_results {
+ TransactionExecutionResult::Executed { details, .. } => {
+ if details.status.is_err() {
+ return Err((sanitized_tx, exec_results));
+ }
+ }
+ TransactionExecutionResult::NotExecuted(e) => {
+ if !matches!(e, TransactionError::AccountInUse) {
+ return Err((sanitized_tx, exec_results));
+ }
+ }
+ }
+ }
+ Ok(())
+}
+
+/// Executing a bundle is somewhat complicated compared to executing single transactions. In order to
+/// avoid duplicate logic for execution and simulation, this function can be leveraged.
+///
+/// Assumptions for the caller:
+/// - all transactions were signed properly
+/// - user has deduplicated transactions inside the bundle
+///
+/// TODO (LB):
+/// - given a bundle with 3 transactions that write lock the following accounts: [A, B, C], on failure of B
+/// we should add in the BundleTransactionsOutput of A and C and return the error for B.
+#[allow(clippy::too_many_arguments)]
+pub fn load_and_execute_bundle<'a>(
+ bank: &Bank,
+ bundle: &'a SanitizedBundle,
+ // Max blockhash age
+ max_age: usize,
+ // Upper bound on execution time for a bundle
+ max_processing_time: &Duration,
+ // Execution data logging
+ enable_cpi_recording: bool,
+ enable_log_recording: bool,
+ enable_return_data_recording: bool,
+ enable_balance_recording: bool,
+ log_messages_bytes_limit: &Option,
+ // simulation will not use the Bank's account locks when building the TransactionBatch
+ // if simulating on an unfrozen bank, this is helpful to avoid stalling replay and use whatever
+ // state the accounts are in at the current time
+ is_simulation: bool,
+ account_overrides: Option<&mut AccountOverrides>,
+ // these must be the same length as the bundle's transactions
+ // allows one to read account state before and after execution of each transaction in the bundle
+ // will use AccountsOverride + Bank
+ pre_execution_accounts: &Vec>>,
+ post_execution_accounts: &Vec >>,
+) -> LoadAndExecuteBundleOutput<'a> {
+ if pre_execution_accounts.len() != post_execution_accounts.len()
+ || post_execution_accounts.len() != bundle.transactions.len()
+ {
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results: vec![],
+ result: Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts),
+ metrics: BundleExecutionMetrics::default(),
+ };
+ }
+ let mut binding = AccountOverrides::default();
+ let account_overrides = account_overrides.unwrap_or(&mut binding);
+
+ let mut chunk_start = 0;
+ let start_time = Instant::now();
+
+ let mut bundle_transaction_results = vec![];
+ let mut metrics = BundleExecutionMetrics::default();
+
+ while chunk_start != bundle.transactions.len() {
+ if start_time.elapsed() > *max_processing_time {
+ trace!("bundle: {} took too long to execute", bundle.bundle_id);
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Err(LoadAndExecuteBundleError::ProcessingTimeExceeded(
+ start_time.elapsed(),
+ )),
+ };
+ }
+
+ let chunk_end = min(bundle.transactions.len(), chunk_start.saturating_add(128));
+ let chunk = &bundle.transactions[chunk_start..chunk_end];
+
+ // Note: these batches are dropped after execution and before record/commit, which is atypical
+ // compared to BankingStage which holds account locks until record + commit to avoid race conditions with
+ // other BankingStage threads. However, the caller of this method, BundleConsumer, will use BundleAccountLocks
+ // to hold RW locks across all transactions in a bundle until its processed.
+ let batch = if is_simulation {
+ bank.prepare_sequential_sanitized_batch_with_results_for_simulation(chunk)
+ } else {
+ bank.prepare_sequential_sanitized_batch_with_results(chunk)
+ };
+
+ debug!(
+ "bundle: {} batch num locks ok: {}",
+ bundle.bundle_id,
+ batch.lock_results().iter().filter(|lr| lr.is_ok()).count()
+ );
+
+ // Ensures that bundle lock results only return either:
+ // Ok(()) | Err(TransactionError::AccountInUse)
+ // If the error isn't one of those, the
+ if let Some((transaction, lock_failure)) = batch.check_bundle_lock_results() {
+ debug!(
+ "bundle: {} lock error; signature: {} error: {}",
+ bundle.bundle_id,
+ transaction.signature(),
+ lock_failure
+ );
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Err(LoadAndExecuteBundleError::LockError {
+ signature: *transaction.signature(),
+ transaction_error: lock_failure.clone(),
+ }),
+ };
+ }
+
+ let mut pre_balance_info = PreBalanceInfo::default();
+ let (_, collect_balances_us) = measure_us!({
+ if enable_balance_recording {
+ pre_balance_info.native =
+ bank.collect_balances_with_cache(&batch, Some(account_overrides));
+ pre_balance_info.token = collect_token_balances(
+ bank,
+ &batch,
+ &mut pre_balance_info.mint_decimals,
+ Some(account_overrides),
+ );
+ }
+ });
+ saturating_add_assign!(metrics.collect_balances_us, collect_balances_us);
+
+ let end = min(
+ chunk_start.saturating_add(batch.sanitized_transactions().len()),
+ pre_execution_accounts.len(),
+ );
+
+ let m = Measure::start("accounts");
+ let accounts_requested = &pre_execution_accounts[chunk_start..end];
+ let pre_tx_execution_accounts =
+ get_account_transactions(bank, account_overrides, accounts_requested, &batch);
+ saturating_add_assign!(metrics.collect_pre_post_accounts_us, m.end_as_us());
+
+ let (mut load_and_execute_transactions_output, load_execute_us) = measure_us!(bank
+ .load_and_execute_transactions(
+ &batch,
+ max_age,
+ enable_cpi_recording,
+ enable_log_recording,
+ enable_return_data_recording,
+ &mut metrics.execute_timings,
+ Some(account_overrides),
+ *log_messages_bytes_limit,
+ ));
+ debug!(
+ "bundle id: {} loaded_transactions: {:?}",
+ bundle.bundle_id, load_and_execute_transactions_output.loaded_transactions
+ );
+ saturating_add_assign!(metrics.load_execute_us, load_execute_us);
+
+ // All transactions within a bundle are expected to be executable + not fail
+ // If there's any transactions that executed and failed or didn't execute due to
+ // unexpected failures (not locking related), bail out of bundle execution early.
+ if let Err((failing_tx, exec_result)) = check_bundle_execution_results(
+ load_and_execute_transactions_output
+ .execution_results
+ .as_slice(),
+ batch.sanitized_transactions(),
+ ) {
+ // TODO (LB): we should try to return partial results here for successful bundles in a parallel batch.
+ // given a bundle that write locks the following accounts [[A], [B], [C]]
+ // when B fails, we could return the execution results for A and C, but leave B out.
+ // however, if we have bundle that write locks accounts [[A_1], [A_2], [B], [C]] and B fails
+ // we'll get the results for A_1 but not [A_2], [B], [C] due to the way this loop executes.
+ debug!(
+ "bundle: {} execution error; signature: {} error: {:?}",
+ bundle.bundle_id,
+ failing_tx.signature(),
+ exec_result
+ );
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Err(LoadAndExecuteBundleError::TransactionError {
+ signature: *failing_tx.signature(),
+ execution_result: Box::new(exec_result.clone()),
+ }),
+ };
+ }
+
+ // If none of the transactions were executed, most likely an AccountInUse error
+ // need to retry to ensure that all transactions in the bundle are executed.
+ if !load_and_execute_transactions_output
+ .execution_results
+ .iter()
+ .any(|r| r.was_executed())
+ {
+ saturating_add_assign!(metrics.num_retries, 1);
+ debug!(
+ "bundle: {} no transaction executed, retrying",
+ bundle.bundle_id
+ );
+ continue;
+ }
+
+ // Cache accounts so next iterations of loop can load cached state instead of using
+ // AccountsDB, which will contain stale account state because results aren't committed
+ // to the bank yet.
+ // NOTE: Bank::collect_accounts_to_store does not handle any state changes related to
+ // failed, non-nonce transactions.
+ let m = Measure::start("cache");
+ let accounts = bank.collect_accounts_to_store(
+ batch.sanitized_transactions(),
+ &load_and_execute_transactions_output.execution_results,
+ &mut load_and_execute_transactions_output.loaded_transactions,
+ );
+ for (pubkey, data) in accounts {
+ account_overrides.set_account(pubkey, Some(data.clone()));
+ }
+ saturating_add_assign!(metrics.cache_accounts_us, m.end_as_us());
+
+ let end = max(
+ chunk_start.saturating_add(batch.sanitized_transactions().len()),
+ post_execution_accounts.len(),
+ );
+
+ let m = Measure::start("accounts");
+ let accounts_requested = &post_execution_accounts[chunk_start..end];
+ let post_tx_execution_accounts =
+ get_account_transactions(bank, account_overrides, accounts_requested, &batch);
+ saturating_add_assign!(metrics.collect_pre_post_accounts_us, m.end_as_us());
+
+ let ((post_balances, post_token_balances), collect_balances_us) =
+ measure_us!(if enable_balance_recording {
+ let post_balances =
+ bank.collect_balances_with_cache(&batch, Some(account_overrides));
+ let post_token_balances = collect_token_balances(
+ bank,
+ &batch,
+ &mut pre_balance_info.mint_decimals,
+ Some(account_overrides),
+ );
+ (post_balances, post_token_balances)
+ } else {
+ (
+ TransactionBalances::default(),
+ TransactionTokenBalances::default(),
+ )
+ });
+ saturating_add_assign!(metrics.collect_balances_us, collect_balances_us);
+
+ let processing_end = batch.lock_results().iter().position(|lr| lr.is_err());
+ if let Some(end) = processing_end {
+ chunk_start = chunk_start.saturating_add(end);
+ } else {
+ chunk_start = chunk_end;
+ }
+
+ bundle_transaction_results.push(BundleTransactionsOutput {
+ transactions: chunk,
+ load_and_execute_transactions_output,
+ pre_balance_info,
+ post_balance_info: (post_balances, post_token_balances),
+ pre_tx_execution_accounts,
+ post_tx_execution_accounts,
+ });
+ }
+
+ LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Ok(()),
+ }
+}
+
+fn get_account_transactions(
+ bank: &Bank,
+ account_overrides: &mut AccountOverrides,
+ accounts: &[Option>],
+ batch: &TransactionBatch,
+) -> Vec>> {
+ let iter = izip!(batch.lock_results().iter(), accounts.iter());
+
+ iter.map(|(lock_result, accounts_requested)| {
+ if lock_result.is_ok() {
+ accounts_requested.as_ref().map(|accounts_requested| {
+ accounts_requested
+ .iter()
+ .map(|a| match account_overrides.get(a) {
+ None => (*a, bank.get_account(a).unwrap_or_default()),
+ Some(data) => (*a, data.clone()),
+ })
+ .collect()
+ })
+ } else {
+ None
+ }
+ })
+ .collect()
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::bundle_execution::{load_and_execute_bundle, LoadAndExecuteBundleError},
+ assert_matches::assert_matches,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{
+ bundle::{derive_bundle_id_from_sanitized_transactions, SanitizedBundle},
+ clock::MAX_PROCESSING_AGE,
+ pubkey::Pubkey,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ transaction::{SanitizedTransaction, Transaction, TransactionError},
+ },
+ std::{
+ sync::{Arc, Barrier},
+ thread::{sleep, spawn},
+ time::Duration,
+ },
+ };
+
+ const MAX_PROCESSING_TIME: Duration = Duration::from_secs(1);
+ const LOG_MESSAGE_BYTES_LIMITS: Option = Some(100_000);
+ const MINT_AMOUNT_LAMPORTS: u64 = 1_000_000;
+
+ fn create_simple_test_bank(lamports: u64) -> (GenesisConfigInfo, Arc) {
+ let genesis_config_info = create_genesis_config(lamports);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+ (genesis_config_info, bank)
+ }
+
+ fn make_bundle(txs: &[Transaction]) -> SanitizedBundle {
+ let transactions: Vec<_> = txs
+ .iter()
+ .map(|tx| SanitizedTransaction::try_from_legacy_transaction(tx.clone()).unwrap())
+ .collect();
+
+ let bundle_id = derive_bundle_id_from_sanitized_transactions(&transactions);
+
+ SanitizedBundle {
+ transactions,
+ bundle_id,
+ }
+ }
+
+ fn find_account_index(tx: &Transaction, account: &Pubkey) -> Option {
+ tx.message
+ .account_keys
+ .iter()
+ .position(|pubkey| account == pubkey)
+ }
+
+ /// A single, valid bundle shall execute successfully and return the correct BundleTransactionsOutput content
+ #[test]
+ fn test_single_transaction_bundle_success() {
+ const TRANSFER_AMOUNT: u64 = 1_000;
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+ let lamports_per_signature = bank
+ .get_lamports_per_signature_for_blockhash(&genesis_config_info.genesis_config.hash())
+ .unwrap();
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ TRANSFER_AMOUNT,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+ let default_accounts = vec![None; bundle.transactions.len()];
+
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ // make sure the bundle succeeded
+ assert!(execution_result.result.is_ok());
+
+ // check to make sure there was one batch returned with one transaction that was the same that was put in
+ assert_eq!(execution_result.bundle_transaction_results.len(), 1);
+ let tx_result = execution_result.bundle_transaction_results.get(0).unwrap();
+ assert_eq!(tx_result.transactions.len(), 1);
+ assert_eq!(tx_result.transactions[0], bundle.transactions[0]);
+
+ // make sure the transaction executed successfully
+ assert_eq!(
+ tx_result
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 1
+ );
+ let execution_result = tx_result
+ .load_and_execute_transactions_output
+ .execution_results
+ .get(0)
+ .unwrap();
+ assert!(execution_result.was_executed());
+ assert!(execution_result.was_executed_successfully());
+
+ // Make sure the post-balances are correct
+ assert_eq!(tx_result.pre_balance_info.native.len(), 1);
+ let post_tx_sol_balances = tx_result.post_balance_info.0.get(0).unwrap();
+
+ let minter_message_index =
+ find_account_index(&transactions[0], &genesis_config_info.mint_keypair.pubkey())
+ .unwrap();
+ let receiver_message_index = find_account_index(&transactions[0], &kp.pubkey()).unwrap();
+
+ assert_eq!(
+ post_tx_sol_balances[minter_message_index],
+ MINT_AMOUNT_LAMPORTS - lamports_per_signature - TRANSFER_AMOUNT
+ );
+ assert_eq!(
+ post_tx_sol_balances[receiver_message_index],
+ TRANSFER_AMOUNT
+ );
+ }
+
+ /// Test a simple failure
+ #[test]
+ fn test_single_transaction_bundle_fail() {
+ const TRANSFER_AMOUNT: u64 = 1_000;
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ // kp has no funds, transfer will fail
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &kp,
+ &kp.pubkey(),
+ TRANSFER_AMOUNT,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let default_accounts = vec![None; bundle.transactions.len()];
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ assert_eq!(execution_result.bundle_transaction_results.len(), 0);
+
+ assert!(execution_result.result.is_err());
+
+ match execution_result.result.unwrap_err() {
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_)
+ | LoadAndExecuteBundleError::LockError { .. }
+ | LoadAndExecuteBundleError::InvalidPreOrPostAccounts => {
+ unreachable!();
+ }
+ LoadAndExecuteBundleError::TransactionError {
+ signature,
+ execution_result,
+ } => {
+ assert_eq!(signature, *bundle.transactions[0].signature());
+ assert!(!execution_result.was_executed());
+ }
+ }
+ }
+
+ /// Tests a multi-tx bundle that succeeds. Checks the returned results
+ #[test]
+ fn test_multi_transaction_bundle_success() {
+ const TRANSFER_AMOUNT_1: u64 = 100_000;
+ const TRANSFER_AMOUNT_2: u64 = 50_000;
+ const TRANSFER_AMOUNT_3: u64 = 10_000;
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+ let lamports_per_signature = bank
+ .get_lamports_per_signature_for_blockhash(&genesis_config_info.genesis_config.hash())
+ .unwrap();
+
+ // mint transfers 100k to 1
+ // 1 transfers 50k to 2
+ // 2 transfers 10k to 3
+ // should get executed in 3 batches [[1], [2], [3]]
+ let kp1 = Keypair::new();
+ let kp2 = Keypair::new();
+ let kp3 = Keypair::new();
+ let transactions = vec![
+ transfer(
+ &genesis_config_info.mint_keypair,
+ &kp1.pubkey(),
+ TRANSFER_AMOUNT_1,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp1,
+ &kp2.pubkey(),
+ TRANSFER_AMOUNT_2,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp2,
+ &kp3.pubkey(),
+ TRANSFER_AMOUNT_3,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ ];
+ let bundle = make_bundle(&transactions);
+
+ let default_accounts = vec![None; bundle.transactions.len()];
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ assert!(execution_result.result.is_ok());
+ assert_eq!(execution_result.bundle_transaction_results.len(), 3);
+
+ // first batch contains the first tx that was executed
+ assert_eq!(
+ execution_result.bundle_transaction_results[0].transactions,
+ bundle.transactions
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 3
+ );
+ assert!(execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results[0]
+ .was_executed_successfully());
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results[1]
+ .flattened_result(),
+ Err(TransactionError::AccountInUse)
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results[2]
+ .flattened_result(),
+ Err(TransactionError::AccountInUse)
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .pre_balance_info
+ .native
+ .len(),
+ 3
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .post_balance_info
+ .0
+ .len(),
+ 3
+ );
+
+ let minter_index =
+ find_account_index(&transactions[0], &genesis_config_info.mint_keypair.pubkey())
+ .unwrap();
+ let kp1_index = find_account_index(&transactions[0], &kp1.pubkey()).unwrap();
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .post_balance_info
+ .0[0][minter_index],
+ MINT_AMOUNT_LAMPORTS - lamports_per_signature - TRANSFER_AMOUNT_1
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .post_balance_info
+ .0[0][kp1_index],
+ TRANSFER_AMOUNT_1
+ );
+
+ // in the second batch, the second transaction was executed
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .transactions
+ .to_owned(),
+ bundle.transactions[1..]
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 2
+ );
+ assert!(execution_result.bundle_transaction_results[1]
+ .load_and_execute_transactions_output
+ .execution_results[0]
+ .was_executed_successfully());
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .load_and_execute_transactions_output
+ .execution_results[1]
+ .flattened_result(),
+ Err(TransactionError::AccountInUse)
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .pre_balance_info
+ .native
+ .len(),
+ 2
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .post_balance_info
+ .0
+ .len(),
+ 2
+ );
+
+ let kp1_index = find_account_index(&transactions[1], &kp1.pubkey()).unwrap();
+ let kp2_index = find_account_index(&transactions[1], &kp2.pubkey()).unwrap();
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .post_balance_info
+ .0[0][kp1_index],
+ TRANSFER_AMOUNT_1 - lamports_per_signature - TRANSFER_AMOUNT_2
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .post_balance_info
+ .0[0][kp2_index],
+ TRANSFER_AMOUNT_2
+ );
+
+ // in the third batch, the third transaction was executed
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .transactions
+ .to_owned(),
+ bundle.transactions[2..]
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 1
+ );
+ assert!(execution_result.bundle_transaction_results[2]
+ .load_and_execute_transactions_output
+ .execution_results[0]
+ .was_executed_successfully());
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .pre_balance_info
+ .native
+ .len(),
+ 1
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .post_balance_info
+ .0
+ .len(),
+ 1
+ );
+
+ let kp2_index = find_account_index(&transactions[2], &kp2.pubkey()).unwrap();
+ let kp3_index = find_account_index(&transactions[2], &kp3.pubkey()).unwrap();
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .post_balance_info
+ .0[0][kp2_index],
+ TRANSFER_AMOUNT_2 - lamports_per_signature - TRANSFER_AMOUNT_3
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .post_balance_info
+ .0[0][kp3_index],
+ TRANSFER_AMOUNT_3
+ );
+ }
+
+ /// Tests a multi-tx bundle with the middle transaction failing.
+ #[test]
+ fn test_multi_transaction_bundle_fails() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp1 = Keypair::new();
+ let kp2 = Keypair::new();
+ let kp3 = Keypair::new();
+ let transactions = vec![
+ transfer(
+ &genesis_config_info.mint_keypair,
+ &kp1.pubkey(),
+ 100_000,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp2,
+ &kp3.pubkey(),
+ 100_000,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp1,
+ &kp2.pubkey(),
+ 100_000,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ ];
+ let bundle = make_bundle(&transactions);
+
+ let default_accounts = vec![None; bundle.transactions.len()];
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+ match execution_result.result.as_ref().unwrap_err() {
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_)
+ | LoadAndExecuteBundleError::LockError { .. }
+ | LoadAndExecuteBundleError::InvalidPreOrPostAccounts => {
+ unreachable!();
+ }
+
+ LoadAndExecuteBundleError::TransactionError {
+ signature,
+ execution_result: tx_failure,
+ } => {
+ assert_eq!(signature, bundle.transactions[1].signature());
+ assert_eq!(
+ tx_failure.flattened_result(),
+ Err(TransactionError::AccountNotFound)
+ );
+ assert_eq!(execution_result.bundle_transaction_results().len(), 0);
+ }
+ }
+ }
+
+ /// Tests that when the max processing time is exceeded, the bundle is an error
+ #[test]
+ fn test_bundle_max_processing_time_exceeded() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 2,
+ genesis_config_info.genesis_config.hash(),
+ ))];
+
+ // locks it and prevents execution bc write lock on genesis_config_info.mint_keypair + kp.pubkey() held
+ let _batch = bank.prepare_sanitized_batch(&locked_transfer);
+
+ let default = vec![None; bundle.transactions.len()];
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &default,
+ &default,
+ );
+ assert_matches!(
+ result.result,
+ Err(LoadAndExecuteBundleError::ProcessingTimeExceeded(_))
+ );
+ }
+
+ #[test]
+ fn test_simulate_bundle_with_locked_account_works() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 2,
+ genesis_config_info.genesis_config.hash(),
+ ))];
+
+ let _batch = bank.prepare_sanitized_batch(&locked_transfer);
+
+ // simulation ignores account locks so you can simulate bundles on unfrozen banks
+ let default = vec![None; bundle.transactions.len()];
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ true,
+ None,
+ &default,
+ &default,
+ );
+ assert!(result.result.is_ok());
+ }
+
+ /// Creates a multi-tx bundle and temporarily locks the accounts for one of the transactions in a bundle.
+ /// Ensures the result is what's expected
+ #[test]
+ fn test_bundle_works_with_released_account_locks() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+ let barrier = Arc::new(Barrier::new(2));
+
+ let kp = Keypair::new();
+
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = Arc::new(make_bundle(&transactions));
+
+ let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 2,
+ genesis_config_info.genesis_config.hash(),
+ ))];
+
+ // background thread locks the accounts for a bit then unlocks them
+ let thread = {
+ let barrier = barrier.clone();
+ let bank = bank.clone();
+ spawn(move || {
+ let batch = bank.prepare_sanitized_batch(&locked_transfer);
+ barrier.wait();
+ sleep(Duration::from_millis(500));
+ drop(batch);
+ })
+ };
+
+ let _ = barrier.wait();
+
+ // load_and_execute_bundle should spin for a bit then process after the 500ms sleep is over
+ let default = vec![None; bundle.transactions.len()];
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_secs(2),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &default,
+ &default,
+ );
+ assert!(result.result.is_ok());
+
+ thread.join().unwrap();
+ }
+
+ /// Tests that when the max processing time is exceeded, the bundle is an error
+ #[test]
+ fn test_bundle_bad_pre_post_accounts() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &vec![None; 2],
+ &vec![None; bundle.transactions.len()],
+ );
+ assert_matches!(
+ result.result,
+ Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts)
+ );
+
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &vec![None; bundle.transactions.len()],
+ &vec![None; 2],
+ );
+ assert_matches!(
+ result.result,
+ Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts)
+ );
+ }
+}
diff --git a/bundle/src/lib.rs b/bundle/src/lib.rs
new file mode 100644
index 0000000000..a93e0d3d17
--- /dev/null
+++ b/bundle/src/lib.rs
@@ -0,0 +1,60 @@
+use {
+ crate::bundle_execution::LoadAndExecuteBundleError,
+ anchor_lang::error::Error,
+ serde::{Deserialize, Serialize},
+ solana_poh::poh_recorder::PohRecorderError,
+ solana_sdk::pubkey::Pubkey,
+ thiserror::Error,
+};
+
+pub mod bundle_execution;
+
+#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub enum TipError {
+ #[error("account is missing from bank: {0}")]
+ AccountMissing(Pubkey),
+
+ #[error("Anchor error: {0}")]
+ AnchorError(String),
+
+ #[error("Lock error")]
+ LockError,
+
+ #[error("Error executing initialize programs")]
+ InitializeProgramsError,
+
+ #[error("Error cranking tip programs")]
+ CrankTipError,
+}
+
+impl From for TipError {
+ fn from(anchor_err: Error) -> Self {
+ match anchor_err {
+ Error::AnchorError(e) => Self::AnchorError(e.error_msg),
+ Error::ProgramError(e) => Self::AnchorError(e.to_string()),
+ }
+ }
+}
+
+pub type BundleExecutionResult = Result;
+
+#[derive(Error, Debug, Clone)]
+pub enum BundleExecutionError {
+ #[error("The bank has hit the max allotted time for processing transactions")]
+ BankProcessingTimeLimitReached,
+
+ #[error("The bundle exceeds the cost model")]
+ ExceedsCostModel,
+
+ #[error("Runtime error while executing the bundle: {0}")]
+ TransactionFailure(#[from] LoadAndExecuteBundleError),
+
+ #[error("Error locking bundle because a transaction is malformed")]
+ LockError,
+
+ #[error("PoH record error: {0}")]
+ PohRecordError(#[from] PohRecorderError),
+
+ #[error("Tip payment error {0}")]
+ TipError(#[from] TipError),
+}
diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh
index 113b009aa4..6b41beda32 100644
--- a/ci/buildkite-pipeline-in-disk.sh
+++ b/ci/buildkite-pipeline-in-disk.sh
@@ -292,7 +292,7 @@ if [[ -n $BUILDKITE_TAG ]]; then
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
# Jump directly to the secondary build to publish release artifacts quickly
- trigger_secondary_step
+# trigger_secondary_step
exit 0
fi
@@ -320,5 +320,5 @@ fi
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
pull_or_push_steps
wait_step
-trigger_secondary_step
+#trigger_secondary_step
exit 0
diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh
index d7467f98b7..78b92d26c1 100755
--- a/ci/buildkite-pipeline.sh
+++ b/ci/buildkite-pipeline.sh
@@ -309,7 +309,7 @@ if [[ -n $BUILDKITE_TAG ]]; then
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
# Jump directly to the secondary build to publish release artifacts quickly
- trigger_secondary_step
+# trigger_secondary_step
exit 0
fi
@@ -337,5 +337,5 @@ fi
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
pull_or_push_steps
wait_step
-trigger_secondary_step
+#trigger_secondary_step
exit 0
diff --git a/ci/channel-info.sh b/ci/channel-info.sh
index c82806454d..101583307f 100755
--- a/ci/channel-info.sh
+++ b/ci/channel-info.sh
@@ -11,7 +11,7 @@ here="$(dirname "$0")"
# shellcheck source=ci/semver_bash/semver.sh
source "$here"/semver_bash/semver.sh
-remote=https://github.com/solana-labs/solana.git
+remote=https://github.com/jito-foundation/jito-solana.git
# Fetch all vX.Y.Z tags
#
diff --git a/ci/check-crates.sh b/ci/check-crates.sh
index 655504ea11..d6a9ad9c39 100755
--- a/ci/check-crates.sh
+++ b/ci/check-crates.sh
@@ -31,6 +31,9 @@ printf "%s\n" "${files[@]}"
error_count=0
for file in "${files[@]}"; do
read -r crate_name package_publish workspace < <(toml get "$file" . | jq -r '(.package.name | tostring)+" "+(.package.publish | tostring)+" "+(.workspace | tostring)')
+ if [ "$crate_name" == "solana-bundle" ]; then
+ continue
+ fi
echo "=== $crate_name ($file) ==="
if [[ $package_publish = 'false' ]]; then
diff --git a/core/Cargo.toml b/core/Cargo.toml
index 46eae11c4a..d4eec33edf 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -14,6 +14,7 @@ edition = { workspace = true }
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[dependencies]
+anchor-lang = { workspace = true }
base64 = { workspace = true }
bincode = { workspace = true }
bs58 = { workspace = true }
@@ -24,11 +25,16 @@ eager = { workspace = true }
etcd-client = { workspace = true, features = ["tls"] }
histogram = { workspace = true }
itertools = { workspace = true }
+jito-protos = { workspace = true }
+jito-tip-distribution = { workspace = true }
+jito-tip-payment = { workspace = true }
lazy_static = { workspace = true }
log = { workspace = true }
lru = { workspace = true }
min-max-heap = { workspace = true }
num_enum = { workspace = true }
+prost = { workspace = true }
+prost-types = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
rayon = { workspace = true }
@@ -37,6 +43,7 @@ serde = { workspace = true }
serde_derive = { workspace = true }
solana-address-lookup-table-program = { workspace = true }
solana-bloom = { workspace = true }
+solana-bundle = { workspace = true }
solana-client = { workspace = true }
solana-entry = { workspace = true }
solana-frozen-abi = { workspace = true }
@@ -67,6 +74,7 @@ sys-info = { workspace = true }
tempfile = { workspace = true }
thiserror = { workspace = true }
tokio = { version = "~1.14.1", features = ["full"] }
+tonic = { workspace = true }
trees = { workspace = true }
[dev-dependencies]
@@ -75,8 +83,10 @@ matches = { workspace = true }
raptorq = { workspace = true }
serde_json = { workspace = true }
serial_test = { workspace = true }
+solana-bundle = { workspace = true }
solana-logger = { workspace = true }
solana-program-runtime = { workspace = true }
+solana-program-test = { workspace = true }
solana-stake-program = { workspace = true }
static_assertions = { workspace = true }
systemstat = { workspace = true }
@@ -87,6 +97,7 @@ sysctl = { workspace = true }
[build-dependencies]
rustc_version = { workspace = true }
+tonic-build = { workspace = true }
[[bench]]
name = "banking_stage"
diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs
index a6254292a5..22932e01a4 100644
--- a/core/benches/banking_stage.rs
+++ b/core/benches/banking_stage.rs
@@ -14,6 +14,7 @@ use {
committer::Committer, consumer::Consumer, BankingStage, BankingStageStats,
},
banking_trace::{BankingPacketBatch, BankingTracer},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
qos_service::QosService,
unprocessed_packet_batches::*,
@@ -50,6 +51,7 @@ use {
vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction,
},
std::{
+ collections::HashSet,
iter::repeat_with,
sync::{atomic::Ordering, Arc, RwLock},
time::{Duration, Instant},
@@ -61,8 +63,15 @@ fn check_txs(receiver: &Arc>, ref_tx_count: usize) {
let mut total = 0;
let now = Instant::now();
loop {
- if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::new(1, 0)) {
- total += entry.transactions.len();
+ if let Ok(WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }) = receiver.recv_timeout(Duration::new(1, 0))
+ {
+ total += entries_ticks
+ .iter()
+ .map(|e| e.0.transactions.len())
+ .sum::();
}
if total >= ref_tx_count {
break;
@@ -105,7 +114,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
);
let (s, _r) = unbounded();
let committer = Committer::new(None, s, Arc::new(PrioritizationFeeCache::new(0u64)));
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// This tests the performance of buffering packets.
// If the packet buffers are copied, performance will be poor.
bencher.iter(move || {
@@ -299,7 +315,9 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
None,
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
- &Arc::new(PrioritizationFeeCache::new(0u64)),
+ &Arc::new(PrioritizationFeeCache::default()),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
let chunk_len = verified.len() / CHUNKS;
diff --git a/core/benches/cluster_info.rs b/core/benches/cluster_info.rs
index 04eb85c2dd..46da7fd03b 100644
--- a/core/benches/cluster_info.rs
+++ b/core/benches/cluster_info.rs
@@ -79,6 +79,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
&cluster_info,
&bank_forks,
&SocketAddrSpace::Unspecified,
+ &None,
)
.unwrap();
});
diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs
index d6f908c577..875297a131 100644
--- a/core/benches/consumer.rs
+++ b/core/benches/consumer.rs
@@ -9,15 +9,15 @@ use {
},
solana_core::{
banking_stage::{committer::Committer, consumer::Consumer},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
qos_service::QosService,
},
- solana_entry::entry::Entry,
solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
},
solana_poh::{
- poh_recorder::{create_test_recorder, PohRecorder},
+ poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
poh_service::PohService,
},
solana_runtime::bank::Bank,
@@ -26,9 +26,12 @@ use {
signer::Signer, stake_history::Epoch, system_program, system_transaction,
transaction::SanitizedTransaction,
},
- std::sync::{
- atomic::{AtomicBool, Ordering},
- Arc, RwLock,
+ std::{
+ collections::HashSet,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, RwLock,
+ },
},
tempfile::TempDir,
test::Bencher,
@@ -81,7 +84,14 @@ fn create_consumer(poh_recorder: &RwLock) -> Consumer {
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let committer = Committer::new(None, replay_vote_sender, Arc::default());
let transaction_recorder = poh_recorder.read().unwrap().new_recorder();
- Consumer::new(committer, transaction_recorder, QosService::new(0), None)
+ Consumer::new(
+ committer,
+ transaction_recorder,
+ QosService::new(0),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ )
}
struct BenchFrame {
@@ -90,7 +100,7 @@ struct BenchFrame {
exit: Arc,
poh_recorder: Arc>,
poh_service: PohService,
- signal_receiver: Receiver<(Arc, (Entry, u64))>,
+ signal_receiver: Receiver,
}
fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame {
diff --git a/core/benches/proto_to_packet.rs b/core/benches/proto_to_packet.rs
new file mode 100644
index 0000000000..87f85f9c7f
--- /dev/null
+++ b/core/benches/proto_to_packet.rs
@@ -0,0 +1,56 @@
+#![feature(test)]
+
+extern crate test;
+
+use {
+ jito_protos::proto::packet::{
+ Meta as PbMeta, Packet as PbPacket, PacketBatch, PacketFlags as PbFlags,
+ },
+ solana_core::proto_packet_to_packet,
+ solana_sdk::packet::{Packet, PACKET_DATA_SIZE},
+ std::iter::repeat,
+ test::{black_box, Bencher},
+};
+
+fn get_proto_packet(i: u8) -> PbPacket {
+ PbPacket {
+ data: repeat(i).take(PACKET_DATA_SIZE).collect(),
+ meta: Some(PbMeta {
+ size: PACKET_DATA_SIZE as u64,
+ addr: "255.255.255.255:65535".to_string(),
+ port: 65535,
+ flags: Some(PbFlags {
+ discard: false,
+ forwarded: false,
+ repair: false,
+ simple_vote_tx: false,
+ tracer_packet: false,
+ }),
+ sender_stake: 0,
+ }),
+ }
+}
+
+#[bench]
+fn bench_proto_to_packet(bencher: &mut Bencher) {
+ bencher.iter(|| {
+ black_box(proto_packet_to_packet(get_proto_packet(1)));
+ });
+}
+
+#[bench]
+fn bench_batch_list_to_packets(bencher: &mut Bencher) {
+ let packet_batch = PacketBatch {
+ packets: (0..128).map(get_proto_packet).collect(),
+ };
+
+ bencher.iter(|| {
+ black_box(
+ packet_batch
+ .packets
+ .iter()
+ .map(|p| proto_packet_to_packet(p.clone()))
+ .collect::>(),
+ );
+ });
+}
diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs
index 9af88ae880..440c5353a4 100644
--- a/core/benches/retransmit_stage.rs
+++ b/core/benches/retransmit_stage.rs
@@ -124,6 +124,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
shreds_receiver,
Arc::default(), // solana_rpc::max_slots::MaxSlots
None,
+ Arc::new(RwLock::new(None)),
);
let mut index = 0;
diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs
index eb2e7ba9d0..32d0c0bda7 100644
--- a/core/src/accounts_hash_verifier.rs
+++ b/core/src/accounts_hash_verifier.rs
@@ -75,7 +75,8 @@ impl AccountsHashVerifier {
)) = Self::get_next_accounts_package(
&accounts_package_sender,
&accounts_package_receiver,
- ) else {
+ )
+ else {
std::thread::sleep(LOOP_LIMITER);
continue;
};
@@ -302,7 +303,9 @@ impl AccountsHashVerifier {
(accounts_hash.into(), accounts_hash, None)
}
CalcAccountsHashFlavor::Incremental => {
- let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) = accounts_package.package_type else {
+ let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) =
+ accounts_package.package_type
+ else {
panic!("Calculating incremental accounts hash requires a base slot");
};
let (base_accounts_hash, base_capitalization) = accounts_package
diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs
index 110e1f5aa4..7373ffd5b3 100644
--- a/core/src/admin_rpc_post_init.rs
+++ b/core/src/admin_rpc_post_init.rs
@@ -1,10 +1,12 @@
use {
+ crate::proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig},
solana_gossip::cluster_info::ClusterInfo,
solana_runtime::bank_forks::BankForks,
solana_sdk::pubkey::Pubkey,
std::{
collections::HashSet,
- sync::{Arc, RwLock},
+ net::SocketAddr,
+ sync::{Arc, Mutex, RwLock},
},
};
@@ -14,4 +16,7 @@ pub struct AdminRpcRequestMetadataPostInit {
pub bank_forks: Arc>,
pub vote_account: Pubkey,
pub repair_whitelist: Arc>>,
+ pub block_engine_config: Arc>,
+ pub relayer_config: Arc>,
+ pub shred_receiver_address: Arc>>,
}
diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs
index dc75d24a53..337b4e40ea 100644
--- a/core/src/banking_stage.rs
+++ b/core/src/banking_stage.rs
@@ -12,6 +12,7 @@ use {
crate::{
banking_stage::committer::Committer,
banking_trace::BankingPacketReceiver,
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
latest_unprocessed_votes::{LatestUnprocessedVotes, VoteSource},
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
qos_service::QosService,
@@ -31,9 +32,14 @@ use {
bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache,
vote_sender_types::ReplayVoteSender,
},
- solana_sdk::{feature_set::allow_votes_to_directly_update_vote_state, timing::AtomicInterval},
+ solana_sdk::{
+ feature_set::allow_votes_to_directly_update_vote_state, pubkey::Pubkey,
+ timing::AtomicInterval,
+ },
std::{
- cmp, env,
+ cmp,
+ collections::HashSet,
+ env,
sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
Arc, RwLock,
@@ -45,7 +51,7 @@ use {
pub mod committer;
pub mod consumer;
-mod decision_maker;
+pub(crate) mod decision_maker;
mod forwarder;
mod packet_receiver;
@@ -311,6 +317,8 @@ impl BankingStage {
connection_cache: Arc,
bank_forks: Arc>,
prioritization_fee_cache: &Arc,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
Self::new_num_threads(
cluster_info,
@@ -325,6 +333,8 @@ impl BankingStage {
connection_cache,
bank_forks,
prioritization_fee_cache,
+ blacklisted_accounts,
+ bundle_account_locker,
)
}
@@ -342,6 +352,8 @@ impl BankingStage {
connection_cache: Arc,
bank_forks: Arc>,
prioritization_fee_cache: &Arc,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
assert!(num_threads >= MIN_TOTAL_THREADS);
// Single thread to generate entries from many banks.
@@ -424,6 +436,8 @@ impl BankingStage {
poh_recorder.read().unwrap().new_recorder(),
QosService::new(id),
log_messages_bytes_limit,
+ blacklisted_accounts.clone(),
+ bundle_account_locker.clone(),
);
Builder::new()
@@ -584,7 +598,7 @@ mod tests {
crate::banking_trace::{BankingPacketBatch, BankingTracer},
crossbeam_channel::{unbounded, Receiver},
itertools::Itertools,
- solana_entry::entry::{Entry, EntrySlice},
+ solana_entry::entry::EntrySlice,
solana_gossip::cluster_info::Node,
solana_ledger::{
blockstore::Blockstore,
@@ -598,6 +612,7 @@ mod tests {
solana_poh::{
poh_recorder::{
create_test_recorder, PohRecorderError, Record, RecordTransactionsSummary,
+ WorkingBankEntry,
},
poh_service::PohService,
},
@@ -673,6 +688,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
drop(non_vote_sender);
drop(tpu_vote_sender);
@@ -729,6 +746,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
trace!("sending bank");
drop(non_vote_sender);
@@ -741,7 +760,12 @@ mod tests {
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
- .map(|(_bank, (entry, _tick_height))| entry)
+ .flat_map(
+ |WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }| entries_ticks.into_iter().map(|(e, _)| e),
+ )
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
@@ -810,6 +834,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
// fund another account so we can send 2 good transactions in a single batch.
@@ -861,9 +887,14 @@ mod tests {
bank.process_transaction(&fund_tx).unwrap();
//receive entries + ticks
loop {
- let entries: Vec = entry_receiver
+ let entries: Vec<_> = entry_receiver
.iter()
- .map(|(_bank, (entry, _tick_height))| entry)
+ .flat_map(
+ |WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }| entries_ticks.into_iter().map(|(e, _)| e),
+ )
.collect();
assert!(entries.verify(&blockhash));
@@ -972,6 +1003,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
// wait for banking_stage to eat the packets
@@ -990,7 +1023,12 @@ mod tests {
// check that the balance is what we expect.
let entries: Vec<_> = entry_receiver
.iter()
- .map(|(_bank, (entry, _tick_height))| entry)
+ .flat_map(
+ |WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }| entries_ticks.into_iter().map(|(e, _)| e),
+ )
.collect();
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
@@ -1051,15 +1089,19 @@ mod tests {
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()).into(),
];
- let _ = recorder.record_transactions(bank.slot(), txs.clone());
- let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
+ let _ = recorder.record_transactions(bank.slot(), vec![txs.clone()]);
+ let WorkingBankEntry {
+ bank,
+ entries_ticks,
+ } = entry_receiver.recv().unwrap();
+ let entry = &entries_ticks.get(0).unwrap().0;
assert_eq!(entry.transactions, txs);
// Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions),
// record_transactions should throw MaxHeightReached
let next_slot = bank.slot() + 1;
let RecordTransactionsSummary { result, .. } =
- recorder.record_transactions(next_slot, txs);
+ recorder.record_transactions(next_slot, vec![txs]);
assert_matches!(result, Err(PohRecorderError::MaxHeightReached));
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
@@ -1166,6 +1208,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
let keypairs = (0..100).map(|_| Keypair::new()).collect_vec();
diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs
index 81ae708d65..5925f64289 100644
--- a/core/src/banking_stage/committer.rs
+++ b/core/src/banking_stage/committer.rs
@@ -16,11 +16,9 @@ use {
transaction_batch::TransactionBatch,
vote_sender_types::ReplayVoteSender,
},
- solana_sdk::{pubkey::Pubkey, saturating_add_assign},
- solana_transaction_status::{
- token_balances::TransactionTokenBalancesSet, TransactionTokenBalance,
- },
- std::{collections::HashMap, sync::Arc},
+ solana_sdk::saturating_add_assign,
+ solana_transaction_status::{token_balances::TransactionTokenBalancesSet, PreBalanceInfo},
+ std::sync::Arc,
};
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -29,13 +27,6 @@ pub enum CommitTransactionDetails {
NotCommitted,
}
-#[derive(Default)]
-pub(super) struct PreBalanceInfo {
- pub native: Vec>,
- pub token: Vec>,
- pub mint_decimals: HashMap,
-}
-
pub struct Committer {
transaction_status_sender: Option,
replay_vote_sender: ReplayVoteSender,
@@ -144,7 +135,7 @@ impl Committer {
let txs = batch.sanitized_transactions().to_vec();
let post_balances = bank.collect_balances(batch);
let post_token_balances =
- collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals);
+ collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals, None);
let mut transaction_index = starting_transaction_index.unwrap_or_default();
let batch_transaction_indexes: Vec<_> = tx_results
.execution_results
diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs
index 856b5ad6f2..396c755217 100644
--- a/core/src/banking_stage/consume_worker.rs
+++ b/core/src/banking_stage/consume_worker.rs
@@ -132,6 +132,7 @@ mod tests {
scheduler_messages::{TransactionBatchId, TransactionId},
tests::{create_slow_genesis_config, sanitize_transactions, simulate_poh},
},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
qos_service::QosService,
},
crossbeam_channel::unbounded,
@@ -149,6 +150,7 @@ mod tests {
signature::Keypair, system_transaction,
},
std::{
+ collections::HashSet,
sync::{atomic::AtomicBool, RwLock},
thread::JoinHandle,
},
@@ -206,7 +208,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let (consume_sender, consume_receiver) = unbounded();
let (consumed_sender, consumed_receiver) = unbounded();
diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs
index 9ae1009672..79d3c1750b 100644
--- a/core/src/banking_stage/consumer.rs
+++ b/core/src/banking_stage/consumer.rs
@@ -4,7 +4,7 @@ use {
BankingStageStats,
},
crate::{
- banking_stage::committer::PreBalanceInfo,
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
immutable_deserialized_packet::ImmutableDeserializedPacket,
leader_slot_banking_stage_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary},
leader_slot_banking_stage_timing_metrics::LeaderExecuteAndCommitTimings,
@@ -18,7 +18,6 @@ use {
BankStart, PohRecorderError, RecordTransactionsSummary, RecordTransactionsTimings,
TransactionRecorder,
},
- solana_program_runtime::timings::ExecuteTimings,
solana_runtime::{
bank::{Bank, LoadAndExecuteTransactionsOutput, TransactionCheckResult},
transaction_batch::TransactionBatch,
@@ -26,11 +25,15 @@ use {
},
solana_sdk::{
clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE},
- feature_set, saturating_add_assign,
+ feature_set,
+ pubkey::Pubkey,
+ saturating_add_assign,
timing::timestamp,
transaction::{self, AddressLoader, SanitizedTransaction, TransactionError},
},
+ solana_transaction_status::PreBalanceInfo,
std::{
+ collections::HashSet,
sync::{atomic::Ordering, Arc},
time::Instant,
},
@@ -70,6 +73,8 @@ pub struct Consumer {
transaction_recorder: TransactionRecorder,
qos_service: QosService,
log_messages_bytes_limit: Option,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
}
impl Consumer {
@@ -78,12 +83,16 @@ impl Consumer {
transaction_recorder: TransactionRecorder,
qos_service: QosService,
log_messages_bytes_limit: Option,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
Self {
committer,
transaction_recorder,
qos_service,
log_messages_bytes_limit,
+ blacklisted_accounts,
+ bundle_account_locker,
}
}
@@ -113,6 +122,7 @@ impl Consumer {
packets_to_process,
)
},
+ &self.blacklisted_accounts,
);
if reached_end_of_slot {
@@ -443,20 +453,26 @@ impl Consumer {
cost_model_us,
) = measure_us!(self.qos_service.select_and_accumulate_transaction_costs(
bank,
+ &mut bank.write_cost_tracker().unwrap(),
txs,
pre_results
));
// Only lock accounts for those transactions are selected for the block;
// Once accounts are locked, other threads cannot encode transactions that will modify the
- // same account state
+ // same account state.
+ // BundleAccountLocker is used to prevent race conditions with bundled transactions from bundle stage
+ let bundle_account_locks = self.bundle_account_locker.account_locks();
let (batch, lock_us) = measure_us!(bank.prepare_sanitized_batch_with_results(
txs,
transaction_qos_cost_results.iter().map(|r| match r {
Ok(_cost) => Ok(()),
Err(err) => Err(err.clone()),
- })
+ }),
+ &bundle_account_locks.read_locks(),
+ &bundle_account_locks.write_locks()
));
+ drop(bundle_account_locks);
// retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit
// WouldExceedMaxAccountCostLimit, WouldExceedMaxVoteCostLimit
@@ -501,8 +517,9 @@ impl Consumer {
.iter_mut()
.for_each(|x| *x += chunk_offset);
- let (cu, us) =
- Self::accumulate_execute_units_and_time(&execute_and_commit_timings.execute_timings);
+ let (cu, us) = execute_and_commit_timings
+ .execute_timings
+ .accumulate_execute_units_and_time();
self.qos_service.accumulate_actual_execute_cu(cu);
self.qos_service.accumulate_actual_execute_time(us);
@@ -539,7 +556,7 @@ impl Consumer {
if transaction_status_sender_enabled {
pre_balance_info.native = bank.collect_balances(batch);
pre_balance_info.token =
- collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals)
+ collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals, None)
}
});
execute_and_commit_timings.collect_balances_us = collect_balances_us;
@@ -588,7 +605,7 @@ impl Consumer {
let (record_transactions_summary, record_us) = measure_us!(self
.transaction_recorder
- .record_transactions(bank.slot(), executed_transactions));
+ .record_transactions(bank.slot(), vec![executed_transactions]));
execute_and_commit_timings.record_us = record_us;
let RecordTransactionsSummary {
@@ -670,18 +687,6 @@ impl Consumer {
}
}
- fn accumulate_execute_units_and_time(execute_timings: &ExecuteTimings) -> (u64, u64) {
- execute_timings.details.per_program_timings.values().fold(
- (0, 0),
- |(units, times), program_timings| {
- (
- units.saturating_add(program_timings.accumulated_units),
- times.saturating_add(program_timings.accumulated_us),
- )
- },
- )
- }
-
/// This function filters pending packets that are still valid
/// # Arguments
/// * `transactions` - a batch of transactions deserialized from packets
@@ -749,7 +754,7 @@ mod tests {
},
solana_perf::packet::Packet,
solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry},
- solana_program_runtime::timings::ProgramTiming,
+ solana_program_runtime::timings::{ExecuteTimings, ProgramTiming},
solana_rpc::transaction_status_service::TransactionStatusService,
solana_runtime::{cost_model::CostModel, prioritization_fee_cache::PrioritizationFeeCache},
solana_sdk::{
@@ -808,7 +813,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_summary =
consumer.process_transactions(&bank, &Instant::now(), &transactions);
@@ -964,7 +976,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_batch_output =
consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -989,7 +1008,13 @@ mod tests {
let mut done = false;
// read entries until I find mine, might be ticks...
- while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() {
+ while let Ok(WorkingBankEntry {
+ bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert!(entries_ticks.len() == 1);
+ let entry = &entries_ticks.get(0).unwrap().0;
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
@@ -1091,7 +1116,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_batch_output =
consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -1177,7 +1209,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let get_block_cost = || bank.read_cost_tracker().unwrap().block_cost();
let get_tx_count = || bank.read_cost_tracker().unwrap().transaction_count();
@@ -1327,7 +1366,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_batch_output =
consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -1524,7 +1570,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder.clone(), QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder.clone(),
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_summary =
consumer.process_transactions(&bank, &Instant::now(), &transactions);
@@ -1649,7 +1702,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let _ = consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -1787,7 +1847,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let _ = consumer.process_and_record_transactions(&bank, &[sanitized_tx.clone()], 0);
@@ -1847,7 +1914,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// When the working bank in poh_recorder is None, no packets should be processed (consume will not be called)
assert!(!poh_recorder.read().unwrap().has_bank());
@@ -1925,7 +1999,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// When the working bank in poh_recorder is None, no packets should be processed
assert!(!poh_recorder.read().unwrap().has_bank());
@@ -1977,7 +2058,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// When the working bank in poh_recorder is None, no packets should be processed (consume will not be called)
assert!(!poh_recorder.read().unwrap().has_bank());
@@ -2065,7 +2153,7 @@ mod tests {
expected_units += n * 1000;
}
- let (units, us) = Consumer::accumulate_execute_units_and_time(&execute_timings);
+ let (units, us) = execute_timings.accumulate_execute_units_and_time();
assert_eq!(expected_units, units);
assert_eq!(expected_us, us);
diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs
index b245e1fb59..fd6c7c2d90 100644
--- a/core/src/banking_trace.rs
+++ b/core/src/banking_trace.rs
@@ -318,6 +318,7 @@ impl BankingTracer {
}
}
+#[derive(Clone)]
pub struct TracedSender {
label: ChannelLabel,
sender: Sender,
diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs
index 3b6e4967c3..1f5b652383 100644
--- a/core/src/broadcast_stage.rs
+++ b/core/src/broadcast_stage.rs
@@ -36,7 +36,7 @@ use {
std::{
collections::{HashMap, HashSet},
iter::repeat_with,
- net::UdpSocket,
+ net::{SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex, RwLock,
@@ -87,6 +87,7 @@ impl BroadcastStageType {
blockstore: Arc,
bank_forks: Arc>,
shred_version: u16,
+ shred_receiver_address: Arc>>,
) -> BroadcastStage {
match self {
BroadcastStageType::Standard => BroadcastStage::new(
@@ -98,6 +99,7 @@ impl BroadcastStageType {
blockstore,
bank_forks,
StandardBroadcastRun::new(shred_version),
+ shred_receiver_address,
),
BroadcastStageType::FailEntryVerification => BroadcastStage::new(
@@ -109,6 +111,7 @@ impl BroadcastStageType {
blockstore,
bank_forks,
FailEntryVerificationBroadcastRun::new(shred_version),
+ Arc::new(RwLock::new(None)),
),
BroadcastStageType::BroadcastFakeShreds => BroadcastStage::new(
@@ -120,6 +123,7 @@ impl BroadcastStageType {
blockstore,
bank_forks,
BroadcastFakeShredsRun::new(0, shred_version),
+ Arc::new(RwLock::new(None)),
),
BroadcastStageType::BroadcastDuplicates(config) => BroadcastStage::new(
@@ -131,6 +135,7 @@ impl BroadcastStageType {
blockstore,
bank_forks,
BroadcastDuplicatesRun::new(shred_version, config.clone()),
+ Arc::new(RwLock::new(None)),
),
}
}
@@ -151,6 +156,7 @@ trait BroadcastRun {
cluster_info: &ClusterInfo,
sock: &UdpSocket,
bank_forks: &RwLock,
+ shred_receiver_address: &Arc>>,
) -> Result<()>;
fn record(&mut self, receiver: &Mutex, blockstore: &Blockstore) -> Result<()>;
}
@@ -245,6 +251,7 @@ impl BroadcastStage {
blockstore: Arc,
bank_forks: Arc>,
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
+ shred_receiver_address: Arc>>,
) -> Self {
let (socket_sender, socket_receiver) = unbounded();
let (blockstore_sender, blockstore_receiver) = unbounded();
@@ -276,8 +283,16 @@ impl BroadcastStage {
let mut bs_transmit = broadcast_stage_run.clone();
let cluster_info = cluster_info.clone();
let bank_forks = bank_forks.clone();
+ let shred_receiver_address = shred_receiver_address.clone();
+
let run_transmit = move || loop {
- let res = bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks);
+ let res = bs_transmit.transmit(
+ &socket_receiver,
+ &cluster_info,
+ &sock,
+ &bank_forks,
+ &shred_receiver_address,
+ );
let res = Self::handle_error(res, "solana-broadcaster-transmit");
if let Some(res) = res {
return res;
@@ -397,6 +412,7 @@ pub fn broadcast_shreds(
cluster_info: &ClusterInfo,
bank_forks: &RwLock,
socket_addr_space: &SocketAddrSpace,
+ shred_receiver_address: &Option,
) -> Result<()> {
let mut result = Ok(());
let mut shred_select = Measure::start("shred_select");
@@ -412,13 +428,22 @@ pub fn broadcast_shreds(
let cluster_nodes =
cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info);
update_peer_stats(&cluster_nodes, last_datapoint_submit);
- shreds.filter_map(move |shred| {
- cluster_nodes
- .get_broadcast_peer(&shred.id())?
- .tvu(Protocol::UDP)
- .ok()
- .filter(|addr| socket_addr_space.check(addr))
- .map(|addr| (shred.payload(), addr))
+ shreds.flat_map(move |shred| {
+ let mut addrs = Vec::with_capacity(2);
+ if let Some(shred_receiver_address) = shred_receiver_address {
+ addrs.push(*shred_receiver_address);
+ }
+
+ if let Some(peer) = cluster_nodes.get_broadcast_peer(&shred.id()) {
+ if let Ok(tvu) = peer.tvu(Protocol::UDP) {
+ addrs.push(tvu);
+ }
+ }
+
+ addrs
+ .into_iter()
+ .filter(|a| socket_addr_space.check(a))
+ .map(move |addr| (shred.payload(), addr))
})
})
.collect();
@@ -620,6 +645,7 @@ pub mod test {
blockstore.clone(),
bank_forks,
StandardBroadcastRun::new(0),
+ Arc::new(RwLock::new(None)),
);
MockBroadcastStage {
@@ -659,7 +685,10 @@ pub mod test {
let ticks = create_ticks(max_tick_height - start_tick_height, 0, Hash::default());
for (i, tick) in ticks.into_iter().enumerate() {
entry_sender
- .send((bank.clone(), (tick, i as u64 + 1)))
+ .send(WorkingBankEntry {
+ bank: bank.clone(),
+ entries_ticks: vec![(tick, i as u64 + 1)],
+ })
.expect("Expect successful send to broadcast service");
}
}
diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs
index 08eec89838..050e87aa66 100644
--- a/core/src/broadcast_stage/broadcast_duplicates_run.rs
+++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs
@@ -265,6 +265,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
cluster_info: &ClusterInfo,
sock: &UdpSocket,
bank_forks: &RwLock,
+ _shred_receiver_addr: &Arc>>,
) -> Result<()> {
let (shreds, _) = receiver.lock().unwrap().recv()?;
if shreds.is_empty() {
diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs
index 01650aad32..6b23429029 100644
--- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs
+++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs
@@ -132,6 +132,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
cluster_info: &ClusterInfo,
sock: &UdpSocket,
_bank_forks: &RwLock,
+ _shred_receiver_addr: &Arc>>,
) -> Result<()> {
for (data_shreds, batch_info) in receiver.lock().unwrap().iter() {
let fake = batch_info.is_some();
diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs
index f9485d59a9..6150bf4fec 100644
--- a/core/src/broadcast_stage/broadcast_utils.rs
+++ b/core/src/broadcast_stage/broadcast_utils.rs
@@ -36,13 +36,22 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result
32 * ShredData::capacity(/*merkle_proof_size*/ None).unwrap() as u64;
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
- let (mut bank, (entry, mut last_tick_height)) = receiver.recv_timeout(timer)?;
- let mut entries = vec![entry];
+
+ let WorkingBankEntry {
+ mut bank,
+ entries_ticks,
+ } = receiver.recv_timeout(timer)?;
+ let mut last_tick_height = entries_ticks.iter().last().unwrap().1;
+ let mut entries: Vec = entries_ticks.into_iter().map(|(e, _)| e).collect();
+
assert!(last_tick_height <= bank.max_tick_height());
// Drain channel
while last_tick_height != bank.max_tick_height() {
- let (try_bank, (entry, tick_height)) = match receiver.try_recv() {
+ let WorkingBankEntry {
+ bank: try_bank,
+ entries_ticks: new_entries_ticks,
+ } = match receiver.try_recv() {
Ok(working_bank_entry) => working_bank_entry,
Err(_) => break,
};
@@ -53,8 +62,8 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result
entries.clear();
bank = try_bank;
}
- last_tick_height = tick_height;
- entries.push(entry);
+ last_tick_height = new_entries_ticks.iter().last().unwrap().1;
+ entries.extend(new_entries_ticks.into_iter().map(|(entry, _)| entry));
assert!(last_tick_height <= bank.max_tick_height());
}
@@ -65,11 +74,13 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result
while last_tick_height != bank.max_tick_height()
&& serialized_batch_byte_count < target_serialized_batch_byte_count
{
- let (try_bank, (entry, tick_height)) =
- match receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) {
- Ok(working_bank_entry) => working_bank_entry,
- Err(_) => break,
- };
+ let WorkingBankEntry {
+ bank: try_bank,
+ entries_ticks: new_entries_ticks,
+ } = match receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) {
+ Ok(working_bank_entry) => working_bank_entry,
+ Err(_) => break,
+ };
// If the bank changed, that implies the previous slot was interrupted and we do not have to
// broadcast its entries.
if try_bank.slot() != bank.slot() {
@@ -79,10 +90,12 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result
bank = try_bank;
coalesce_start = Instant::now();
}
- last_tick_height = tick_height;
- let entry_bytes = serialized_size(&entry)?;
- serialized_batch_byte_count += entry_bytes;
- entries.push(entry);
+ last_tick_height = new_entries_ticks.iter().last().unwrap().1;
+
+ for (entry, _) in &new_entries_ticks {
+ serialized_batch_byte_count += serialized_size(entry)?;
+ }
+ entries.extend(new_entries_ticks.into_iter().map(|(entry, _)| entry));
assert!(last_tick_height <= bank.max_tick_height());
}
let time_coalesced = coalesce_start.elapsed();
@@ -139,7 +152,11 @@ mod tests {
.map(|i| {
let entry = Entry::new(&last_hash, 1, vec![tx.clone()]);
last_hash = entry.hash;
- s.send((bank1.clone(), (entry.clone(), i))).unwrap();
+ s.send(WorkingBankEntry {
+ bank: bank1.clone(),
+ entries_ticks: vec![(entry.clone(), i)],
+ })
+ .unwrap();
entry
})
.collect();
@@ -173,11 +190,18 @@ mod tests {
last_hash = entry.hash;
// Interrupt slot 1 right before the last tick
if tick_height == expected_last_height {
- s.send((bank2.clone(), (entry.clone(), tick_height)))
- .unwrap();
+ s.send(WorkingBankEntry {
+ bank: bank2.clone(),
+ entries_ticks: vec![(entry.clone(), tick_height)],
+ })
+ .unwrap();
Some(entry)
} else {
- s.send((bank1.clone(), (entry, tick_height))).unwrap();
+ s.send(WorkingBankEntry {
+ bank: bank1.clone(),
+ entries_ticks: vec![(entry, tick_height)],
+ })
+ .unwrap();
None
}
})
diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs
index e7b899ab0f..61ea979402 100644
--- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs
+++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs
@@ -3,7 +3,7 @@ use {
crate::cluster_nodes::ClusterNodesCache,
solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder},
solana_sdk::{hash::Hash, signature::Keypair},
- std::{thread::sleep, time::Duration},
+ std::{net::SocketAddr, thread::sleep, time::Duration},
};
pub const NUM_BAD_SLOTS: u64 = 10;
@@ -162,6 +162,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
cluster_info: &ClusterInfo,
sock: &UdpSocket,
bank_forks: &RwLock,
+ shred_receiver_address: &Arc>>,
) -> Result<()> {
let (shreds, _) = receiver.lock().unwrap().recv()?;
broadcast_shreds(
@@ -173,6 +174,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
cluster_info,
bank_forks,
cluster_info.socket_addr_space(),
+ &shred_receiver_address.read().unwrap(),
)
}
fn record(&mut self, receiver: &Mutex, blockstore: &Blockstore) -> Result<()> {
diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs
index c22224b1fa..196883d453 100644
--- a/core/src/broadcast_stage/standard_broadcast_run.rs
+++ b/core/src/broadcast_stage/standard_broadcast_run.rs
@@ -17,7 +17,7 @@ use {
signature::Keypair,
timing::{duration_as_us, AtomicInterval},
},
- std::{sync::RwLock, time::Duration},
+ std::{net::SocketAddr, sync::RwLock, time::Duration},
};
#[derive(Clone)]
@@ -191,10 +191,22 @@ impl StandardBroadcastRun {
let brecv = Arc::new(Mutex::new(brecv));
//data
- let _ = self.transmit(&srecv, cluster_info, sock, bank_forks);
+ let _ = self.transmit(
+ &srecv,
+ cluster_info,
+ sock,
+ bank_forks,
+ &Arc::new(RwLock::new(None)),
+ );
let _ = self.record(&brecv, blockstore);
//coding
- let _ = self.transmit(&srecv, cluster_info, sock, bank_forks);
+ let _ = self.transmit(
+ &srecv,
+ cluster_info,
+ sock,
+ bank_forks,
+ &Arc::new(RwLock::new(None)),
+ );
let _ = self.record(&brecv, blockstore);
Ok(())
}
@@ -387,6 +399,7 @@ impl StandardBroadcastRun {
shreds: Arc>,
broadcast_shred_batch_info: Option,
bank_forks: &RwLock,
+ shred_receiver_addr: &Option,
) -> Result<()> {
trace!("Broadcasting {:?} shreds", shreds.len());
let mut transmit_stats = TransmitShredsStats::default();
@@ -402,6 +415,7 @@ impl StandardBroadcastRun {
cluster_info,
bank_forks,
cluster_info.socket_addr_space(),
+ shred_receiver_addr,
)?;
transmit_time.stop();
@@ -471,9 +485,17 @@ impl BroadcastRun for StandardBroadcastRun {
cluster_info: &ClusterInfo,
sock: &UdpSocket,
bank_forks: &RwLock,
+ shred_receiver_address: &Arc>>,
) -> Result<()> {
let (shreds, batch_info) = receiver.lock().unwrap().recv()?;
- self.broadcast(sock, cluster_info, shreds, batch_info, bank_forks)
+ self.broadcast(
+ sock,
+ cluster_info,
+ shreds,
+ batch_info,
+ bank_forks,
+ &shred_receiver_address.read().unwrap(),
+ )
}
fn record(&mut self, receiver: &Mutex, blockstore: &Blockstore) -> Result<()> {
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
diff --git a/core/src/bundle_stage.rs b/core/src/bundle_stage.rs
new file mode 100644
index 0000000000..feff5512cc
--- /dev/null
+++ b/core/src/bundle_stage.rs
@@ -0,0 +1,435 @@
+//! The `bundle_stage` processes bundles, which are list of transactions to be executed
+//! sequentially and atomically.
+use {
+ crate::{
+ banking_stage::decision_maker::{BufferedPacketsDecision, DecisionMaker},
+ bundle_stage::{
+ bundle_account_locker::BundleAccountLocker, bundle_consumer::BundleConsumer,
+ bundle_packet_receiver::BundleReceiver,
+ bundle_reserved_space_manager::BundleReservedSpaceManager,
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics, committer::Committer,
+ },
+ packet_bundle::PacketBundle,
+ proxy::block_engine_stage::BlockBuilderFeeInfo,
+ qos_service::QosService,
+ tip_manager::TipManager,
+ unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ },
+ crossbeam_channel::{Receiver, RecvTimeoutError},
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_ledger::blockstore_processor::TransactionStatusSender,
+ solana_measure::measure,
+ solana_poh::poh_recorder::PohRecorder,
+ solana_runtime::{
+ bank_forks::BankForks, block_cost_limits::MAX_BLOCK_UNITS,
+ prioritization_fee_cache::PrioritizationFeeCache, vote_sender_types::ReplayVoteSender,
+ },
+ solana_sdk::timing::AtomicInterval,
+ std::{
+ collections::VecDeque,
+ sync::{
+ atomic::{AtomicBool, AtomicU64, Ordering},
+ Arc, Mutex, RwLock,
+ },
+ thread::{self, Builder, JoinHandle},
+ time::{Duration, Instant},
+ },
+};
+
+pub mod bundle_account_locker;
+mod bundle_consumer;
+mod bundle_packet_deserializer;
+mod bundle_packet_receiver;
+mod bundle_reserved_space_manager;
+pub(crate) mod bundle_stage_leader_metrics;
+mod committer;
+
+const MAX_BUNDLE_RETRY_DURATION: Duration = Duration::from_millis(10);
+const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10);
+
+// Stats emitted periodically
+#[derive(Default)]
+pub struct BundleStageLoopMetrics {
+ last_report: AtomicInterval,
+ id: u32,
+
+ // total received
+ num_bundles_received: AtomicU64,
+ num_packets_received: AtomicU64,
+
+ // newly buffered
+ newly_buffered_bundles_count: AtomicU64,
+
+ // currently buffered
+ current_buffered_bundles_count: AtomicU64,
+ current_buffered_packets_count: AtomicU64,
+
+ // buffered due to cost model
+ cost_model_buffered_bundles_count: AtomicU64,
+ cost_model_buffered_packets_count: AtomicU64,
+
+ // number of bundles dropped during insertion
+ num_bundles_dropped: AtomicU64,
+
+ // timings
+ receive_and_buffer_bundles_elapsed_us: AtomicU64,
+ process_buffered_bundles_elapsed_us: AtomicU64,
+}
+
+impl BundleStageLoopMetrics {
+ fn new(id: u32) -> Self {
+ BundleStageLoopMetrics {
+ id,
+ ..BundleStageLoopMetrics::default()
+ }
+ }
+
+ pub fn increment_num_bundles_received(&mut self, count: u64) {
+ self.num_bundles_received
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_num_packets_received(&mut self, count: u64) {
+ self.num_packets_received
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_newly_buffered_bundles_count(&mut self, count: u64) {
+ self.newly_buffered_bundles_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_current_buffered_bundles_count(&mut self, count: u64) {
+ self.current_buffered_bundles_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_current_buffered_packets_count(&mut self, count: u64) {
+ self.current_buffered_packets_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_cost_model_buffered_bundles_count(&mut self, count: u64) {
+ self.cost_model_buffered_bundles_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_cost_model_buffered_packets_count(&mut self, count: u64) {
+ self.cost_model_buffered_packets_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_num_bundles_dropped(&mut self, count: u64) {
+ self.num_bundles_dropped.fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_receive_and_buffer_bundles_elapsed_us(&mut self, count: u64) {
+ self.receive_and_buffer_bundles_elapsed_us
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_process_buffered_bundles_elapsed_us(&mut self, count: u64) {
+ self.process_buffered_bundles_elapsed_us
+ .fetch_add(count, Ordering::Relaxed);
+ }
+}
+
+impl BundleStageLoopMetrics {
+ fn maybe_report(&mut self, report_interval_ms: u64) {
+ if self.last_report.should_update(report_interval_ms) {
+ datapoint_info!(
+ "bundle_stage-loop_stats",
+ ("id", self.id, i64),
+ (
+ "num_bundles_received",
+ self.num_bundles_received.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "num_packets_received",
+ self.num_packets_received.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "newly_buffered_bundles_count",
+ self.newly_buffered_bundles_count.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "current_buffered_bundles_count",
+ self.current_buffered_bundles_count
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "current_buffered_packets_count",
+ self.current_buffered_packets_count
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "num_bundles_dropped",
+ self.num_bundles_dropped.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "receive_and_buffer_bundles_elapsed_us",
+ self.receive_and_buffer_bundles_elapsed_us
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "process_buffered_bundles_elapsed_us",
+ self.process_buffered_bundles_elapsed_us
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ );
+ }
+ }
+}
+
+pub struct BundleStage {
+ bundle_thread: JoinHandle<()>,
+}
+
+impl BundleStage {
+ #[allow(clippy::new_ret_no_self)]
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ cluster_info: &Arc,
+ poh_recorder: &Arc>,
+ bundle_receiver: Receiver>,
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ log_messages_bytes_limit: Option,
+ exit: Arc,
+ tip_manager: TipManager,
+ bundle_account_locker: BundleAccountLocker,
+ block_builder_fee_info: &Arc>,
+ preallocated_bundle_cost: u64,
+ bank_forks: Arc>,
+ prioritization_fee_cache: &Arc,
+ ) -> Self {
+ Self::start_bundle_thread(
+ cluster_info,
+ poh_recorder,
+ bundle_receiver,
+ transaction_status_sender,
+ replay_vote_sender,
+ log_messages_bytes_limit,
+ exit,
+ tip_manager,
+ bundle_account_locker,
+ MAX_BUNDLE_RETRY_DURATION,
+ block_builder_fee_info,
+ preallocated_bundle_cost,
+ bank_forks,
+ prioritization_fee_cache,
+ )
+ }
+
+ pub fn join(self) -> thread::Result<()> {
+ self.bundle_thread.join()
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn start_bundle_thread(
+ cluster_info: &Arc,
+ poh_recorder: &Arc>,
+ bundle_receiver: Receiver>,
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ log_message_bytes_limit: Option,
+ exit: Arc,
+ tip_manager: TipManager,
+ bundle_account_locker: BundleAccountLocker,
+ max_bundle_retry_duration: Duration,
+ block_builder_fee_info: &Arc>,
+ preallocated_bundle_cost: u64,
+ bank_forks: Arc>,
+ prioritization_fee_cache: &Arc,
+ ) -> Self {
+ const BUNDLE_STAGE_ID: u32 = 10_000;
+ let poh_recorder = poh_recorder.clone();
+ let cluster_info = cluster_info.clone();
+
+ let mut bundle_receiver =
+ BundleReceiver::new(BUNDLE_STAGE_ID, bundle_receiver, bank_forks, Some(5));
+
+ let committer = Committer::new(
+ transaction_status_sender,
+ replay_vote_sender,
+ prioritization_fee_cache.clone(),
+ );
+ let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone());
+
+ let unprocessed_bundle_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(1_000),
+ VecDeque::with_capacity(1_000),
+ );
+
+ let reserved_ticks = poh_recorder
+ .read()
+ .unwrap()
+ .ticks_per_slot()
+ .saturating_mul(8)
+ .saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space = BundleReservedSpaceManager::new(
+ MAX_BLOCK_UNITS,
+ preallocated_bundle_cost,
+ reserved_ticks,
+ );
+
+ let consumer = BundleConsumer::new(
+ committer,
+ poh_recorder.read().unwrap().new_recorder(),
+ QosService::new(BUNDLE_STAGE_ID),
+ log_message_bytes_limit,
+ tip_manager,
+ bundle_account_locker,
+ block_builder_fee_info.clone(),
+ max_bundle_retry_duration,
+ cluster_info,
+ reserved_space,
+ );
+
+ let bundle_thread = Builder::new()
+ .name("solBundleStgTx".to_string())
+ .spawn(move || {
+ Self::process_loop(
+ &mut bundle_receiver,
+ decision_maker,
+ consumer,
+ BUNDLE_STAGE_ID,
+ unprocessed_bundle_storage,
+ exit,
+ );
+ })
+ .unwrap();
+
+ Self { bundle_thread }
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn process_loop(
+ bundle_receiver: &mut BundleReceiver,
+ decision_maker: DecisionMaker,
+ mut consumer: BundleConsumer,
+ id: u32,
+ mut unprocessed_bundle_storage: UnprocessedTransactionStorage,
+ exit: Arc,
+ ) {
+ let mut last_metrics_update = Instant::now();
+
+ let mut bundle_stage_metrics = BundleStageLoopMetrics::new(id);
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(id);
+
+ while !exit.load(Ordering::Relaxed) {
+ if !unprocessed_bundle_storage.is_empty()
+ || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD
+ {
+ let (_, process_buffered_packets_time) = measure!(
+ Self::process_buffered_bundles(
+ &decision_maker,
+ &mut consumer,
+ &mut unprocessed_bundle_storage,
+ &mut bundle_stage_leader_metrics,
+ ),
+ "process_buffered_packets",
+ );
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_process_buffered_packets_us(process_buffered_packets_time.as_us());
+ last_metrics_update = Instant::now();
+ }
+
+ match bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_bundle_storage,
+ &mut bundle_stage_metrics,
+ &mut bundle_stage_leader_metrics,
+ ) {
+ Ok(_) | Err(RecvTimeoutError::Timeout) => (),
+ Err(RecvTimeoutError::Disconnected) => break,
+ }
+
+ let bundle_storage = unprocessed_bundle_storage.bundle_storage().unwrap();
+ bundle_stage_metrics.increment_current_buffered_bundles_count(
+ bundle_storage.unprocessed_bundles_len() as u64,
+ );
+ bundle_stage_metrics.increment_current_buffered_packets_count(
+ bundle_storage.unprocessed_packets_len() as u64,
+ );
+ bundle_stage_metrics.increment_cost_model_buffered_bundles_count(
+ bundle_storage.cost_model_buffered_bundles_len() as u64,
+ );
+ bundle_stage_metrics.increment_cost_model_buffered_packets_count(
+ bundle_storage.cost_model_buffered_packets_len() as u64,
+ );
+ bundle_stage_metrics.maybe_report(1_000);
+ }
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn process_buffered_bundles(
+ decision_maker: &DecisionMaker,
+ consumer: &mut BundleConsumer,
+ unprocessed_bundle_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) {
+ let (decision, make_decision_time) =
+ measure!(decision_maker.make_consume_or_forward_decision());
+
+ let (metrics_action, banking_stage_metrics_action) =
+ bundle_stage_leader_metrics.check_leader_slot_boundary(decision.bank_start());
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_make_decision_us(make_decision_time.as_us());
+
+ match decision {
+ // BufferedPacketsDecision::Consume means this leader is scheduled to be running at the moment.
+ // Execute, record, and commit as many bundles possible given time, compute, and other constraints.
+ BufferedPacketsDecision::Consume(bank_start) => {
+ // Take metrics action before consume packets (potentially resetting the
+ // slot metrics tracker to the next slot) so that we don't count the
+ // packet processing metrics from the next slot towards the metrics
+ // of the previous slot
+ bundle_stage_leader_metrics
+ .apply_action(metrics_action, banking_stage_metrics_action);
+
+ let (_, consume_buffered_packets_time) = measure!(
+ consumer.consume_buffered_bundles(
+ &bank_start,
+ unprocessed_bundle_storage,
+ bundle_stage_leader_metrics,
+ ),
+ "consume_buffered_bundles",
+ );
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_consume_buffered_packets_us(consume_buffered_packets_time.as_us());
+ }
+ // BufferedPacketsDecision::Forward means the leader is slot is far away.
+ // Bundles aren't forwarded because it breaks atomicity guarantees, so just drop them.
+ BufferedPacketsDecision::Forward => {
+ let (_num_bundles_cleared, _num_cost_model_buffered_bundles) =
+ unprocessed_bundle_storage.bundle_storage().unwrap().reset();
+
+ // TODO (LB): add metrics here for how many bundles were cleared
+
+ bundle_stage_leader_metrics
+ .apply_action(metrics_action, banking_stage_metrics_action);
+ }
+ // BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold means the validator
+ // is approaching the leader slot, hold bundles. Also, bundles aren't forwarded because it breaks
+ // atomicity guarantees
+ BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => {
+ bundle_stage_leader_metrics
+ .apply_action(metrics_action, banking_stage_metrics_action);
+ }
+ }
+ }
+}
diff --git a/core/src/bundle_stage/bundle_account_locker.rs b/core/src/bundle_stage/bundle_account_locker.rs
new file mode 100644
index 0000000000..7382fcb5b0
--- /dev/null
+++ b/core/src/bundle_stage/bundle_account_locker.rs
@@ -0,0 +1,328 @@
+//! Handles pre-locking bundle accounts so that accounts bundles touch can be reserved ahead
+// of time for execution. Also, ensures that ALL accounts mentioned across a bundle are locked
+// to avoid race conditions between BundleStage and BankingStage.
+//
+// For instance, imagine a bundle with three transactions and the set of accounts for each transaction
+// is: {{A, B}, {B, C}, {C, D}}. We need to lock A, B, and C even though only one is executed at a time.
+// Imagine BundleStage is in the middle of processing {C, D} and we didn't have a lock on accounts {A, B, C}.
+// In this situation, there's a chance that BankingStage can process a transaction containing A or B
+// and commit the results before the bundle completes. By the time the bundle commits the new account
+// state for {A, B, C}, A and B would be incorrect and the entries containing the bundle would be
+// replayed improperly and that leader would have produced an invalid block.
+use {
+ solana_runtime::bank::Bank,
+ solana_sdk::{bundle::SanitizedBundle, pubkey::Pubkey, transaction::TransactionAccountLocks},
+ std::{
+ collections::{hash_map::Entry, HashMap, HashSet},
+ sync::{Arc, Mutex, MutexGuard},
+ },
+ thiserror::Error,
+};
+
+#[derive(Clone, Error, Debug)]
+pub enum BundleAccountLockerError {
+ #[error("locking error")]
+ LockingError,
+}
+
+pub type BundleAccountLockerResult = Result;
+
+pub struct LockedBundle<'a, 'b> {
+ bundle_account_locker: &'a BundleAccountLocker,
+ sanitized_bundle: &'b SanitizedBundle,
+ bank: Arc,
+}
+
+impl<'a, 'b> LockedBundle<'a, 'b> {
+ pub fn new(
+ bundle_account_locker: &'a BundleAccountLocker,
+ sanitized_bundle: &'b SanitizedBundle,
+ bank: &Arc,
+ ) -> Self {
+ Self {
+ bundle_account_locker,
+ sanitized_bundle,
+ bank: bank.clone(),
+ }
+ }
+
+ pub fn sanitized_bundle(&self) -> &SanitizedBundle {
+ self.sanitized_bundle
+ }
+}
+
+// Automatically unlock bundle accounts when destructed
+impl<'a, 'b> Drop for LockedBundle<'a, 'b> {
+ fn drop(&mut self) {
+ let _ = self
+ .bundle_account_locker
+ .unlock_bundle_accounts(self.sanitized_bundle, &self.bank);
+ }
+}
+
+#[derive(Default, Clone)]
+pub struct BundleAccountLocks {
+ read_locks: HashMap,
+ write_locks: HashMap,
+}
+
+impl BundleAccountLocks {
+ pub fn read_locks(&self) -> HashSet {
+ self.read_locks.keys().cloned().collect()
+ }
+
+ pub fn write_locks(&self) -> HashSet {
+ self.write_locks.keys().cloned().collect()
+ }
+
+ pub fn lock_accounts(
+ &mut self,
+ read_locks: HashMap,
+ write_locks: HashMap,
+ ) {
+ for (acc, count) in read_locks {
+ *self.read_locks.entry(acc).or_insert(0) += count;
+ }
+ for (acc, count) in write_locks {
+ *self.write_locks.entry(acc).or_insert(0) += count;
+ }
+ }
+
+ pub fn unlock_accounts(
+ &mut self,
+ read_locks: HashMap,
+ write_locks: HashMap,
+ ) {
+ for (acc, count) in read_locks {
+ if let Entry::Occupied(mut entry) = self.read_locks.entry(acc) {
+ let val = entry.get_mut();
+ *val = val.saturating_sub(count);
+ if entry.get() == &0 {
+ let _ = entry.remove();
+ }
+ } else {
+ warn!("error unlocking read-locked account, account: {:?}", acc);
+ }
+ }
+ for (acc, count) in write_locks {
+ if let Entry::Occupied(mut entry) = self.write_locks.entry(acc) {
+ let val = entry.get_mut();
+ *val = val.saturating_sub(count);
+ if entry.get() == &0 {
+ let _ = entry.remove();
+ }
+ } else {
+ warn!("error unlocking write-locked account, account: {:?}", acc);
+ }
+ }
+ }
+}
+
+#[derive(Clone, Default)]
+pub struct BundleAccountLocker {
+ account_locks: Arc>,
+}
+
+impl BundleAccountLocker {
+ /// used in BankingStage during TransactionBatch construction to ensure that BankingStage
+ /// doesn't lock anything currently locked in the BundleAccountLocker
+ pub fn read_locks(&self) -> HashSet {
+ self.account_locks.lock().unwrap().read_locks()
+ }
+
+ /// used in BankingStage during TransactionBatch construction to ensure that BankingStage
+ /// doesn't lock anything currently locked in the BundleAccountLocker
+ pub fn write_locks(&self) -> HashSet {
+ self.account_locks.lock().unwrap().write_locks()
+ }
+
+ /// used in BankingStage during TransactionBatch construction to ensure that BankingStage
+ /// doesn't lock anything currently locked in the BundleAccountLocker
+ pub fn account_locks(&self) -> MutexGuard {
+ self.account_locks.lock().unwrap()
+ }
+
+ /// Prepares a locked bundle and returns a LockedBundle containing locked accounts.
+ /// When a LockedBundle is dropped, the accounts are automatically unlocked
+ pub fn prepare_locked_bundle<'a, 'b>(
+ &'a self,
+ sanitized_bundle: &'b SanitizedBundle,
+ bank: &Arc,
+ ) -> BundleAccountLockerResult> {
+ let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?;
+
+ self.account_locks
+ .lock()
+ .unwrap()
+ .lock_accounts(read_locks, write_locks);
+ Ok(LockedBundle::new(self, sanitized_bundle, bank))
+ }
+
+ /// Unlocks bundle accounts. Note that LockedBundle::drop will auto-drop the bundle account locks
+ fn unlock_bundle_accounts(
+ &self,
+ sanitized_bundle: &SanitizedBundle,
+ bank: &Bank,
+ ) -> BundleAccountLockerResult<()> {
+ let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?;
+
+ self.account_locks
+ .lock()
+ .unwrap()
+ .unlock_accounts(read_locks, write_locks);
+ Ok(())
+ }
+
+ /// Returns the read and write locks for this bundle
+ /// Each lock type contains a HashMap which maps Pubkey to number of locks held
+ fn get_read_write_locks(
+ bundle: &SanitizedBundle,
+ bank: &Bank,
+ ) -> BundleAccountLockerResult<(HashMap, HashMap)> {
+ let transaction_locks: Vec = bundle
+ .transactions
+ .iter()
+ .filter_map(|tx| {
+ tx.get_account_locks(bank.get_transaction_account_lock_limit())
+ .ok()
+ })
+ .collect();
+
+ if transaction_locks.len() != bundle.transactions.len() {
+ return Err(BundleAccountLockerError::LockingError);
+ }
+
+ let bundle_read_locks = transaction_locks
+ .iter()
+ .flat_map(|tx| tx.readonly.iter().map(|a| **a));
+ let bundle_read_locks =
+ bundle_read_locks
+ .into_iter()
+ .fold(HashMap::new(), |mut map, acc| {
+ *map.entry(acc).or_insert(0) += 1;
+ map
+ });
+
+ let bundle_write_locks = transaction_locks
+ .iter()
+ .flat_map(|tx| tx.writable.iter().map(|a| **a));
+ let bundle_write_locks =
+ bundle_write_locks
+ .into_iter()
+ .fold(HashMap::new(), |mut map, acc| {
+ *map.entry(acc).or_insert(0) += 1;
+ map
+ });
+
+ Ok((bundle_read_locks, bundle_write_locks))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::{
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ packet_bundle::PacketBundle,
+ },
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_runtime::{
+ bank::Bank, genesis_utils::GenesisConfigInfo,
+ transaction_error_metrics::TransactionErrorMetrics,
+ },
+ solana_sdk::{
+ packet::Packet, signature::Signer, signer::keypair::Keypair, system_program,
+ system_transaction::transfer, transaction::VersionedTransaction,
+ },
+ std::{collections::HashSet, sync::Arc},
+ };
+
+ #[test]
+ fn test_simple_lock_bundles() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(2);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let bundle_account_locker = BundleAccountLocker::default();
+
+ let kp0 = Keypair::new();
+ let kp1 = Keypair::new();
+
+ let tx0 = VersionedTransaction::from(transfer(
+ &mint_keypair,
+ &kp0.pubkey(),
+ 1,
+ genesis_config.hash(),
+ ));
+ let tx1 = VersionedTransaction::from(transfer(
+ &mint_keypair,
+ &kp1.pubkey(),
+ 1,
+ genesis_config.hash(),
+ ));
+
+ let mut packet_bundle0 = PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, &tx0).unwrap()]),
+ bundle_id: tx0.signatures[0].to_string(),
+ };
+ let mut packet_bundle1 = PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, &tx1).unwrap()]),
+ bundle_id: tx1.signatures[0].to_string(),
+ };
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+
+ let sanitized_bundle0 = ImmutableDeserializedBundle::new(&mut packet_bundle0, None)
+ .unwrap()
+ .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors)
+ .expect("sanitize bundle 0");
+ let sanitized_bundle1 = ImmutableDeserializedBundle::new(&mut packet_bundle1, None)
+ .unwrap()
+ .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors)
+ .expect("sanitize bundle 1");
+
+ let locked_bundle0 = bundle_account_locker
+ .prepare_locked_bundle(&sanitized_bundle0, &bank)
+ .unwrap();
+
+ assert_eq!(
+ bundle_account_locker.write_locks(),
+ HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey()])
+ );
+ assert_eq!(
+ bundle_account_locker.read_locks(),
+ HashSet::from_iter([system_program::id()])
+ );
+
+ let locked_bundle1 = bundle_account_locker
+ .prepare_locked_bundle(&sanitized_bundle1, &bank)
+ .unwrap();
+ assert_eq!(
+ bundle_account_locker.write_locks(),
+ HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey(), kp1.pubkey()])
+ );
+ assert_eq!(
+ bundle_account_locker.read_locks(),
+ HashSet::from_iter([system_program::id()])
+ );
+
+ drop(locked_bundle0);
+ assert_eq!(
+ bundle_account_locker.write_locks(),
+ HashSet::from_iter([mint_keypair.pubkey(), kp1.pubkey()])
+ );
+ assert_eq!(
+ bundle_account_locker.read_locks(),
+ HashSet::from_iter([system_program::id()])
+ );
+
+ drop(locked_bundle1);
+ assert!(bundle_account_locker.write_locks().is_empty());
+ assert!(bundle_account_locker.read_locks().is_empty());
+ }
+}
diff --git a/core/src/bundle_stage/bundle_consumer.rs b/core/src/bundle_stage/bundle_consumer.rs
new file mode 100644
index 0000000000..83f3619fd6
--- /dev/null
+++ b/core/src/bundle_stage/bundle_consumer.rs
@@ -0,0 +1,1589 @@
+use {
+ crate::{
+ banking_stage::committer::CommitTransactionDetails,
+ bundle_stage::{
+ bundle_account_locker::{BundleAccountLocker, LockedBundle},
+ bundle_reserved_space_manager::BundleReservedSpaceManager,
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics,
+ committer::Committer,
+ },
+ consensus_cache_updater::ConsensusCacheUpdater,
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ leader_slot_banking_stage_metrics::ProcessTransactionsSummary,
+ leader_slot_banking_stage_timing_metrics::LeaderExecuteAndCommitTimings,
+ proxy::block_engine_stage::BlockBuilderFeeInfo,
+ qos_service::QosService,
+ tip_manager::TipManager,
+ unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ },
+ solana_bundle::{
+ bundle_execution::{load_and_execute_bundle, BundleExecutionMetrics},
+ BundleExecutionError, BundleExecutionResult, TipError,
+ },
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_measure::{measure, measure_us},
+ solana_poh::poh_recorder::{BankStart, RecordTransactionsSummary, TransactionRecorder},
+ solana_runtime::{
+ bank::Bank, cost_model::TransactionCost, transaction_error_metrics::TransactionErrorMetrics,
+ },
+ solana_sdk::{
+ bundle::SanitizedBundle,
+ clock::{Slot, MAX_PROCESSING_AGE},
+ feature_set,
+ pubkey::Pubkey,
+ transaction::{self},
+ },
+ std::{
+ collections::HashSet,
+ sync::{Arc, Mutex},
+ time::{Duration, Instant},
+ },
+};
+
+pub struct ExecuteRecordCommitResult {
+ commit_transaction_details: Vec,
+ result: BundleExecutionResult<()>,
+ execution_metrics: BundleExecutionMetrics,
+ execute_and_commit_timings: LeaderExecuteAndCommitTimings,
+ transaction_error_counter: TransactionErrorMetrics,
+}
+
+pub struct BundleConsumer {
+ committer: Committer,
+ transaction_recorder: TransactionRecorder,
+ qos_service: QosService,
+ log_messages_bytes_limit: Option,
+
+ consensus_cache_updater: ConsensusCacheUpdater,
+
+ tip_manager: TipManager,
+ last_tip_update_slot: Slot,
+
+ blacklisted_accounts: HashSet,
+
+ // Manages account locks across multiple transactions within a bundle to prevent race conditions
+ // with BankingStage
+ bundle_account_locker: BundleAccountLocker,
+
+ block_builder_fee_info: Arc>,
+
+ max_bundle_retry_duration: Duration,
+
+ cluster_info: Arc,
+
+ reserved_space: BundleReservedSpaceManager,
+}
+
+impl BundleConsumer {
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ committer: Committer,
+ transaction_recorder: TransactionRecorder,
+ qos_service: QosService,
+ log_messages_bytes_limit: Option,
+ tip_manager: TipManager,
+ bundle_account_locker: BundleAccountLocker,
+ block_builder_fee_info: Arc>,
+ max_bundle_retry_duration: Duration,
+ cluster_info: Arc,
+ reserved_space: BundleReservedSpaceManager,
+ ) -> Self {
+ Self {
+ committer,
+ transaction_recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ consensus_cache_updater: ConsensusCacheUpdater::default(),
+ tip_manager,
+ // MAX because sending tips during slot 0 in tests doesn't work
+ last_tip_update_slot: u64::MAX,
+ blacklisted_accounts: HashSet::default(),
+ bundle_account_locker,
+ block_builder_fee_info,
+ max_bundle_retry_duration,
+ cluster_info,
+ reserved_space,
+ }
+ }
+
+ // A bundle is a series of transactions to be executed sequentially, atomically, and all-or-nothing.
+ // Sequentially:
+ // - Transactions are executed in order
+ // Atomically:
+ // - All transactions in a bundle get recoded to PoH and committed to the bank in the same slot. Account locks
+ // for all accounts in all transactions in a bundle are held during the entire execution to remove POH record race conditions
+ // with transactions in BankingStage.
+ // All-or-nothing:
+ // - All transactions are committed or none. Modified state for the entire bundle isn't recorded to PoH and committed to the
+ // bank until all transactions in the bundle have executed.
+ //
+ // Some corner cases to be aware of when working with BundleStage:
+ // A bundle is not allowed to call the Tip Payment program in a bundle (or BankingStage).
+ // - This is to avoid stealing of tips by malicious parties with bundles that crank the tip
+ // payment program and set the tip receiver to themself.
+ // A bundle is not allowed to touch consensus-related accounts
+ // - This is to avoid stalling the voting BankingStage threads.
+ pub fn consume_buffered_bundles(
+ &mut self,
+ bank_start: &BankStart,
+ unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) {
+ self.maybe_update_blacklist(bank_start);
+ self.reserved_space.tick(&bank_start.working_bank);
+
+ let reached_end_of_slot = unprocessed_transaction_storage.process_bundles(
+ bank_start.working_bank.clone(),
+ bundle_stage_leader_metrics,
+ &self.blacklisted_accounts,
+ |bundles, bundle_stage_leader_metrics| {
+ Self::do_process_bundles(
+ &self.bundle_account_locker,
+ &self.tip_manager,
+ &mut self.last_tip_update_slot,
+ &self.cluster_info,
+ &self.block_builder_fee_info,
+ &self.committer,
+ &self.transaction_recorder,
+ &self.qos_service,
+ &self.log_messages_bytes_limit,
+ self.max_bundle_retry_duration,
+ &self.reserved_space,
+ bundles,
+ bank_start,
+ bundle_stage_leader_metrics,
+ )
+ },
+ );
+
+ if reached_end_of_slot {
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .set_end_of_slot_unprocessed_buffer_len(
+ unprocessed_transaction_storage.len() as u64
+ );
+ }
+ }
+
+ /// Blacklist is updated with the tip payment program + any consensus accounts.
+ fn maybe_update_blacklist(&mut self, bank_start: &BankStart) {
+ if self
+ .consensus_cache_updater
+ .maybe_update(&bank_start.working_bank)
+ {
+ self.blacklisted_accounts = self
+ .consensus_cache_updater
+ .consensus_accounts_cache()
+ .union(&HashSet::from_iter([self
+ .tip_manager
+ .tip_payment_program_id()]))
+ .cloned()
+ .collect();
+
+ debug!(
+ "updated blacklist with {} accounts",
+ self.blacklisted_accounts.len()
+ );
+ }
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn do_process_bundles(
+ bundle_account_locker: &BundleAccountLocker,
+ tip_manager: &TipManager,
+ last_tip_updated_slot: &mut Slot,
+ cluster_info: &Arc,
+ block_builder_fee_info: &Arc>,
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ bundles: &[(ImmutableDeserializedBundle, SanitizedBundle)],
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Vec> {
+ // BundleAccountLocker holds RW locks for ALL accounts in ALL transactions within a single bundle.
+ // By pre-locking bundles before they're ready to be processed, it will prevent BankingStage from
+ // grabbing those locks so BundleStage can process as fast as possible.
+ // A LockedBundle is similar to TransactionBatch; once its dropped the locks are released.
+ #[allow(clippy::needless_collect)]
+ let (locked_bundle_results, locked_bundles_elapsed) = measure!(
+ bundles
+ .iter()
+ .map(|(_, sanitized_bundle)| {
+ bundle_account_locker
+ .prepare_locked_bundle(sanitized_bundle, &bank_start.working_bank)
+ })
+ .collect::>(),
+ "locked_bundles_elapsed"
+ );
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_locked_bundle_elapsed_us(locked_bundles_elapsed.as_us());
+
+ let (execution_results, execute_locked_bundles_elapsed) = measure!(locked_bundle_results
+ .into_iter()
+ .map(|r| match r {
+ Ok(locked_bundle) => {
+ let (r, measure) = measure_us!(Self::process_bundle(
+ bundle_account_locker,
+ tip_manager,
+ last_tip_updated_slot,
+ cluster_info,
+ block_builder_fee_info,
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ &locked_bundle,
+ bank_start,
+ bundle_stage_leader_metrics,
+ ));
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_process_packets_transactions_us(measure);
+ r
+ }
+ Err(_) => {
+ Err(BundleExecutionError::LockError)
+ }
+ })
+ .collect::>());
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_execute_locked_bundles_elapsed_us(execute_locked_bundles_elapsed.as_us());
+ execution_results.iter().for_each(|result| {
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_bundle_execution_result(result);
+ });
+
+ execution_results
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn process_bundle(
+ bundle_account_locker: &BundleAccountLocker,
+ tip_manager: &TipManager,
+ last_tip_updated_slot: &mut Slot,
+ cluster_info: &Arc,
+ block_builder_fee_info: &Arc>,
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ locked_bundle: &LockedBundle,
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Result<(), BundleExecutionError> {
+ if !Bank::should_bank_still_be_processing_txs(
+ &bank_start.bank_creation_time,
+ bank_start.working_bank.ns_per_slot,
+ ) {
+ return Err(BundleExecutionError::BankProcessingTimeLimitReached);
+ }
+
+ if Self::bundle_touches_tip_pdas(
+ locked_bundle.sanitized_bundle(),
+ &tip_manager.get_tip_accounts(),
+ ) && bank_start.working_bank.slot() != *last_tip_updated_slot
+ {
+ let start = Instant::now();
+ let result = Self::handle_tip_programs(
+ bundle_account_locker,
+ tip_manager,
+ cluster_info,
+ block_builder_fee_info,
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ bank_start,
+ bundle_stage_leader_metrics,
+ );
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_change_tip_receiver_elapsed_us(start.elapsed().as_micros() as u64);
+
+ result?;
+
+ *last_tip_updated_slot = bank_start.working_bank.slot();
+ }
+
+ Self::update_qos_and_execute_record_commit_bundle(
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ locked_bundle.sanitized_bundle(),
+ bank_start,
+ bundle_stage_leader_metrics,
+ )?;
+
+ Ok(())
+ }
+
+ /// The validator needs to manage state on two programs related to tips
+ #[allow(clippy::too_many_arguments)]
+ fn handle_tip_programs(
+ bundle_account_locker: &BundleAccountLocker,
+ tip_manager: &TipManager,
+ cluster_info: &Arc,
+ block_builder_fee_info: &Arc>,
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Result<(), BundleExecutionError> {
+ debug!("handle_tip_programs");
+
+ // This will setup the tip payment and tip distribution program if they haven't been
+ // initialized yet, which is typically helpful for local validators. On mainnet and testnet,
+ // this code should never run.
+ let keypair = cluster_info.keypair().clone();
+ let initialize_tip_programs_bundle =
+ tip_manager.get_initialize_tip_programs_bundle(&bank_start.working_bank, &keypair);
+ if let Some(bundle) = initialize_tip_programs_bundle {
+ debug!(
+ "initializing tip programs with {} transactions, bundle id: {}",
+ bundle.transactions.len(),
+ bundle.bundle_id
+ );
+
+ let locked_init_tip_programs_bundle = bundle_account_locker
+ .prepare_locked_bundle(&bundle, &bank_start.working_bank)
+ .map_err(|_| BundleExecutionError::TipError(TipError::LockError))?;
+
+ Self::update_qos_and_execute_record_commit_bundle(
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ locked_init_tip_programs_bundle.sanitized_bundle(),
+ bank_start,
+ bundle_stage_leader_metrics,
+ )
+ .map_err(|e| {
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_init_tip_account_errors(1);
+ error!(
+ "bundle: {} error initializing tip programs: {:?}",
+ locked_init_tip_programs_bundle.sanitized_bundle().bundle_id,
+ e
+ );
+ BundleExecutionError::TipError(TipError::InitializeProgramsError)
+ })?;
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_init_tip_account_ok(1);
+ }
+
+ // There are two frequently run internal cranks inside the jito-solana validator that have to do with managing MEV tips.
+ // One is initialize the TipDistributionAccount, which is a validator's "tip piggy bank" for an epoch
+ // The other is ensuring the tip_receiver is configured correctly to ensure tips are routed to the correct
+ // address. The validator must drain the tip accounts to the previous tip receiver before setting the tip receiver to
+ // themselves.
+
+ let kp = cluster_info.keypair().clone();
+ let tip_crank_bundle = tip_manager.get_tip_programs_crank_bundle(
+ &bank_start.working_bank,
+ &kp,
+ &block_builder_fee_info.lock().unwrap(),
+ )?;
+ debug!("tip_crank_bundle is_some: {}", tip_crank_bundle.is_some());
+
+ if let Some(bundle) = tip_crank_bundle {
+ info!(
+ "bundle id: {} cranking tip programs with {} transactions",
+ bundle.bundle_id,
+ bundle.transactions.len()
+ );
+
+ let locked_tip_crank_bundle = bundle_account_locker
+ .prepare_locked_bundle(&bundle, &bank_start.working_bank)
+ .map_err(|_| BundleExecutionError::TipError(TipError::LockError))?;
+
+ Self::update_qos_and_execute_record_commit_bundle(
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ locked_tip_crank_bundle.sanitized_bundle(),
+ bank_start,
+ bundle_stage_leader_metrics,
+ )
+ .map_err(|e| {
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_change_tip_receiver_errors(1);
+ error!(
+ "bundle: {} error cranking tip programs: {:?}",
+ locked_tip_crank_bundle.sanitized_bundle().bundle_id,
+ e
+ );
+ BundleExecutionError::TipError(TipError::CrankTipError)
+ })?;
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_change_tip_receiver_ok(1);
+ }
+
+ Ok(())
+ }
+
+ /// Reserves space for the entire bundle up-front to ensure the entire bundle can execute.
+ /// Rolls back the reserved space if there's not enough blockspace for all transactions in the bundle.
+ fn reserve_bundle_blockspace(
+ qos_service: &QosService,
+ reserved_space: &BundleReservedSpaceManager,
+ sanitized_bundle: &SanitizedBundle,
+ bank: &Arc,
+ ) -> BundleExecutionResult<(Vec>, usize)> {
+ let mut write_cost_tracker = bank.write_cost_tracker().unwrap();
+
+ // set the block cost limit to the original block cost limit, run the select + accumulate
+ // then reset back to the expected block cost limit. this allows bundle stage to potentially
+ // increase block_compute_limits, allocate the space, and reset the block_cost_limits to
+ // the reserved space without BankingStage racing to allocate this extra reserved space
+ write_cost_tracker.set_block_cost_limit(reserved_space.block_cost_limit());
+ let (transaction_qos_cost_results, cost_model_throttled_transactions_count) = qos_service
+ .select_and_accumulate_transaction_costs(
+ bank,
+ &mut write_cost_tracker,
+ &sanitized_bundle.transactions,
+ std::iter::repeat(Ok(())),
+ );
+ write_cost_tracker.set_block_cost_limit(reserved_space.expected_block_cost_limits(bank));
+ drop(write_cost_tracker);
+
+ // rollback all transaction costs if it can't fit and
+ if transaction_qos_cost_results.iter().any(|c| c.is_err()) {
+ QosService::remove_costs(transaction_qos_cost_results.iter(), None, bank);
+ return Err(BundleExecutionError::ExceedsCostModel);
+ }
+
+ Ok((
+ transaction_qos_cost_results,
+ cost_model_throttled_transactions_count,
+ ))
+ }
+
+ fn update_qos_and_execute_record_commit_bundle(
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ sanitized_bundle: &SanitizedBundle,
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> BundleExecutionResult<()> {
+ debug!(
+ "bundle: {} reserving blockspace for {} transactions",
+ sanitized_bundle.bundle_id,
+ sanitized_bundle.transactions.len()
+ );
+
+ let (
+ (transaction_qos_cost_results, _cost_model_throttled_transactions_count),
+ cost_model_elapsed_us,
+ ) = measure_us!(Self::reserve_bundle_blockspace(
+ qos_service,
+ reserved_space,
+ sanitized_bundle,
+ &bank_start.working_bank
+ )?);
+
+ debug!(
+ "bundle: {} executing, recording, and committing",
+ sanitized_bundle.bundle_id
+ );
+
+ let (result, process_transactions_us) = measure_us!(Self::execute_record_commit_bundle(
+ committer,
+ recorder,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ sanitized_bundle,
+ bank_start,
+ ));
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_execution_retries(result.execution_metrics.num_retries);
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_transaction_errors(&result.transaction_error_counter);
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_process_transactions_us(process_transactions_us);
+
+ let (cu, us) = result
+ .execute_and_commit_timings
+ .execute_timings
+ .accumulate_execute_units_and_time();
+ qos_service.accumulate_actual_execute_cu(cu);
+ qos_service.accumulate_actual_execute_time(us);
+
+ let num_committed = result
+ .commit_transaction_details
+ .iter()
+ .filter(|c| matches!(c, CommitTransactionDetails::Committed { .. }))
+ .count();
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_process_transactions_summary(&ProcessTransactionsSummary {
+ reached_max_poh_height: matches!(
+ result.result,
+ Err(BundleExecutionError::BankProcessingTimeLimitReached)
+ | Err(BundleExecutionError::PohRecordError(_))
+ ),
+ transactions_attempted_execution_count: sanitized_bundle.transactions.len(),
+ committed_transactions_count: num_committed,
+ // NOTE: this assumes that bundles are committed all-or-nothing
+ committed_transactions_with_successful_result_count: num_committed,
+ failed_commit_count: 0,
+ retryable_transaction_indexes: vec![],
+ cost_model_throttled_transactions_count: 0,
+ cost_model_us: cost_model_elapsed_us,
+ execute_and_commit_timings: result.execute_and_commit_timings,
+ error_counters: result.transaction_error_counter,
+ });
+
+ match result.result {
+ Ok(_) => {
+ // it's assumed that all transactions in the bundle executed, can update QoS
+ if !bank_start
+ .working_bank
+ .feature_set
+ .is_active(&feature_set::apply_cost_tracker_during_replay::id())
+ {
+ QosService::update_costs(
+ transaction_qos_cost_results.iter(),
+ Some(&result.commit_transaction_details),
+ &bank_start.working_bank,
+ );
+ }
+
+ qos_service.report_metrics(bank_start.working_bank.slot());
+ Ok(())
+ }
+ Err(e) => {
+ // on bundle failure, none of the transactions are committed, so need to revert
+ // all compute reserved
+ QosService::remove_costs(
+ transaction_qos_cost_results.iter(),
+ None,
+ &bank_start.working_bank,
+ );
+ qos_service.report_metrics(bank_start.working_bank.slot());
+
+ Err(e)
+ }
+ }
+ }
+
+ fn execute_record_commit_bundle(
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ sanitized_bundle: &SanitizedBundle,
+ bank_start: &BankStart,
+ ) -> ExecuteRecordCommitResult {
+ let transaction_status_sender_enabled = committer.transaction_status_sender_enabled();
+
+ let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default();
+
+ debug!("bundle: {} executing", sanitized_bundle.bundle_id);
+ let default_accounts = vec![None; sanitized_bundle.transactions.len()];
+ let mut bundle_execution_results = load_and_execute_bundle(
+ &bank_start.working_bank,
+ sanitized_bundle,
+ MAX_PROCESSING_AGE,
+ &max_bundle_retry_duration,
+ transaction_status_sender_enabled,
+ transaction_status_sender_enabled,
+ transaction_status_sender_enabled,
+ transaction_status_sender_enabled,
+ log_messages_bytes_limit,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ let execution_metrics = bundle_execution_results.metrics();
+
+ execute_and_commit_timings.collect_balances_us = execution_metrics.collect_balances_us;
+ execute_and_commit_timings.load_execute_us = execution_metrics.load_execute_us;
+ execute_and_commit_timings
+ .execute_timings
+ .accumulate(&execution_metrics.execute_timings);
+
+ let mut transaction_error_counter = TransactionErrorMetrics::default();
+ bundle_execution_results
+ .bundle_transaction_results()
+ .iter()
+ .for_each(|r| {
+ transaction_error_counter
+ .accumulate(&r.load_and_execute_transactions_output().error_counters);
+ });
+
+ debug!(
+ "bundle: {} executed, is_ok: {}",
+ sanitized_bundle.bundle_id,
+ bundle_execution_results.result().is_ok()
+ );
+
+ // don't commit bundle if failure executing any part of the bundle
+ if let Err(e) = bundle_execution_results.result() {
+ return ExecuteRecordCommitResult {
+ commit_transaction_details: vec![],
+ result: Err(e.clone().into()),
+ execution_metrics,
+ execute_and_commit_timings,
+ transaction_error_counter,
+ };
+ }
+
+ let (executed_batches, execution_results_to_transactions_us) =
+ measure_us!(bundle_execution_results.executed_transaction_batches());
+
+ debug!(
+ "bundle: {} recording {} batches of {:?} transactions",
+ sanitized_bundle.bundle_id,
+ executed_batches.len(),
+ executed_batches
+ .iter()
+ .map(|b| b.len())
+ .collect::>()
+ );
+
+ let (freeze_lock, freeze_lock_us) = measure_us!(bank_start.working_bank.freeze_lock());
+ execute_and_commit_timings.freeze_lock_us = freeze_lock_us;
+
+ let (
+ RecordTransactionsSummary {
+ result: record_transactions_result,
+ record_transactions_timings,
+ starting_transaction_index,
+ },
+ record_us,
+ ) = measure_us!(
+ recorder.record_transactions(bank_start.working_bank.slot(), executed_batches)
+ );
+
+ execute_and_commit_timings.record_us = record_us;
+ execute_and_commit_timings.record_transactions_timings = record_transactions_timings;
+ execute_and_commit_timings
+ .record_transactions_timings
+ .execution_results_to_transactions_us = execution_results_to_transactions_us;
+
+ debug!(
+ "bundle: {} record result: {}",
+ sanitized_bundle.bundle_id,
+ record_transactions_result.is_ok()
+ );
+
+ // don't commit bundle if failed to record
+ if let Err(e) = record_transactions_result {
+ return ExecuteRecordCommitResult {
+ commit_transaction_details: vec![],
+ result: Err(e.into()),
+ execution_metrics,
+ execute_and_commit_timings,
+ transaction_error_counter,
+ };
+ }
+
+ // note: execute_and_commit_timings.commit_us handled inside this function
+ let (commit_us, commit_bundle_details) = committer.commit_bundle(
+ &mut bundle_execution_results,
+ starting_transaction_index,
+ &bank_start.working_bank,
+ &mut execute_and_commit_timings,
+ );
+ execute_and_commit_timings.commit_us = commit_us;
+
+ drop(freeze_lock);
+
+ // commit_bundle_details contains transactions that were and were not committed
+ // given the current implementation only executes, records, and commits bundles
+ // where all transactions executed, we can filter out the non-committed
+ // TODO (LB): does this make more sense in commit_bundle for future when failing bundles are accepted?
+ let commit_transaction_details = commit_bundle_details
+ .commit_transaction_details
+ .into_iter()
+ .flat_map(|commit_details| {
+ commit_details
+ .into_iter()
+ .filter(|d| matches!(d, CommitTransactionDetails::Committed { .. }))
+ })
+ .collect();
+ debug!(
+ "bundle: {} commit details: {:?}",
+ sanitized_bundle.bundle_id, commit_transaction_details
+ );
+
+ ExecuteRecordCommitResult {
+ commit_transaction_details,
+ result: Ok(()),
+ execution_metrics,
+ execute_and_commit_timings,
+ transaction_error_counter,
+ }
+ }
+
+ /// Returns true if any of the transactions in a bundle mention one of the tip PDAs
+ fn bundle_touches_tip_pdas(bundle: &SanitizedBundle, tip_pdas: &HashSet) -> bool {
+ bundle.transactions.iter().any(|tx| {
+ tx.message()
+ .account_keys()
+ .iter()
+ .any(|a| tip_pdas.contains(a))
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::{
+ bundle_stage::{
+ bundle_account_locker::BundleAccountLocker, bundle_consumer::BundleConsumer,
+ bundle_packet_deserializer::BundlePacketDeserializer,
+ bundle_reserved_space_manager::BundleReservedSpaceManager,
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics, committer::Committer,
+ },
+ packet_bundle::PacketBundle,
+ proxy::block_engine_stage::BlockBuilderFeeInfo,
+ qos_service::QosService,
+ tip_manager::{TipDistributionAccountConfig, TipManager, TipManagerConfig},
+ unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ },
+ crossbeam_channel::{unbounded, Receiver},
+ jito_tip_distribution::sdk::derive_tip_distribution_account_address,
+ rand::{thread_rng, RngCore},
+ solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
+ solana_ledger::{
+ blockstore::Blockstore, genesis_utils::create_genesis_config,
+ get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache,
+ },
+ solana_perf::packet::PacketBatch,
+ solana_poh::{
+ poh_recorder::{PohRecorder, Record, WorkingBankEntry},
+ poh_service::PohService,
+ },
+ solana_program_test::programs::spl_programs,
+ solana_runtime::{
+ bank::Bank,
+ block_cost_limits::MAX_BLOCK_UNITS,
+ cost_model::CostModel,
+ genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo},
+ prioritization_fee_cache::PrioritizationFeeCache,
+ transaction_error_metrics::TransactionErrorMetrics,
+ },
+ solana_sdk::{
+ bundle::{derive_bundle_id, SanitizedBundle},
+ clock::MAX_PROCESSING_AGE,
+ feature_set::delay_visibility_of_program_deployment,
+ fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE},
+ genesis_config::ClusterType,
+ hash::Hash,
+ native_token::sol_to_lamports,
+ packet::Packet,
+ poh_config::PohConfig,
+ pubkey::Pubkey,
+ rent::Rent,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ transaction::{SanitizedTransaction, TransactionError, VersionedTransaction},
+ vote::state::VoteState,
+ },
+ solana_streamer::socket::SocketAddrSpace,
+ std::{
+ collections::{HashSet, VecDeque},
+ str::FromStr,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, Mutex, RwLock,
+ },
+ thread::{Builder, JoinHandle},
+ time::Duration,
+ },
+ };
+
+ struct TestFixture {
+ genesis_config_info: GenesisConfigInfo,
+ leader_keypair: Keypair,
+ bank: Arc,
+ exit: Arc,
+ poh_recorder: Arc>,
+ poh_simulator: JoinHandle<()>,
+ entry_receiver: Receiver,
+ }
+
+ pub(crate) fn simulate_poh(
+ record_receiver: Receiver,
+ poh_recorder: &Arc>,
+ ) -> JoinHandle<()> {
+ let poh_recorder = poh_recorder.clone();
+ let is_exited = poh_recorder.read().unwrap().is_exited.clone();
+ let tick_producer = Builder::new()
+ .name("solana-simulate_poh".to_string())
+ .spawn(move || loop {
+ PohService::read_record_receiver_and_process(
+ &poh_recorder,
+ &record_receiver,
+ Duration::from_millis(10),
+ );
+ if is_exited.load(Ordering::Relaxed) {
+ break;
+ }
+ });
+ tick_producer.unwrap()
+ }
+
+ pub fn create_test_recorder(
+ bank: &Arc,
+ blockstore: Arc,
+ poh_config: Option,
+ leader_schedule_cache: Option>,
+ ) -> (
+ Arc,
+ Arc>,
+ JoinHandle<()>,
+ Receiver,
+ ) {
+ let leader_schedule_cache = match leader_schedule_cache {
+ Some(provided_cache) => provided_cache,
+ None => Arc::new(LeaderScheduleCache::new_from_bank(bank)),
+ };
+ let exit = Arc::new(AtomicBool::new(false));
+ let poh_config = poh_config.unwrap_or_default();
+ let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
+ bank.tick_height(),
+ bank.last_blockhash(),
+ bank.clone(),
+ Some((4, 4)),
+ bank.ticks_per_slot(),
+ &Pubkey::default(),
+ blockstore,
+ &leader_schedule_cache,
+ &poh_config,
+ exit.clone(),
+ );
+ poh_recorder.set_bank(bank.clone(), false);
+
+ let poh_recorder = Arc::new(RwLock::new(poh_recorder));
+ let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
+
+ (exit, poh_recorder, poh_simulator, entry_receiver)
+ }
+
+ fn create_test_fixture(mint_sol: u64) -> TestFixture {
+ let mint_keypair = Keypair::new();
+ let leader_keypair = Keypair::new();
+ let voting_keypair = Keypair::new();
+
+ let rent = Rent::default();
+
+ let mut genesis_config = create_genesis_config_with_leader_ex(
+ sol_to_lamports(mint_sol as f64),
+ &mint_keypair.pubkey(),
+ &leader_keypair.pubkey(),
+ &voting_keypair.pubkey(),
+ &solana_sdk::pubkey::new_rand(),
+ rent.minimum_balance(VoteState::size_of()) + sol_to_lamports(1_000_000.0),
+ sol_to_lamports(1_000_000.0),
+ FeeRateGovernor {
+ // Initialize with a non-zero fee
+ lamports_per_signature: DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE / 2,
+ ..FeeRateGovernor::default()
+ },
+ rent, // most tests don't expect rent
+ ClusterType::Development,
+ spl_programs(&rent),
+ );
+ genesis_config.ticks_per_slot *= 8;
+
+ // workaround for https://github.com/solana-labs/solana/issues/30085
+ // the test can deploy and use spl_programs in the genensis slot without waiting for the next one
+ let mut bank = Bank::new_for_tests(&genesis_config);
+ bank.deactivate_feature(&delay_visibility_of_program_deployment::id());
+ let bank = Arc::new(bank);
+
+ let ledger_path = get_tmp_ledger_path_auto_delete!();
+ let blockstore = Arc::new(
+ Blockstore::open(ledger_path.path())
+ .expect("Expected to be able to open database ledger"),
+ );
+
+ let (exit, poh_recorder, poh_simulator, entry_receiver) =
+ create_test_recorder(&bank, blockstore, Some(PohConfig::default()), None);
+
+ let validator_pubkey = voting_keypair.pubkey();
+ TestFixture {
+ genesis_config_info: GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ voting_keypair,
+ validator_pubkey,
+ },
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ }
+ }
+
+ fn make_random_overlapping_bundles(
+ mint_keypair: &Keypair,
+ num_bundles: usize,
+ num_packets_per_bundle: usize,
+ hash: Hash,
+ max_transfer_amount: u64,
+ ) -> Vec {
+ let mut rng = thread_rng();
+
+ (0..num_bundles)
+ .map(|_| {
+ let transfers: Vec<_> = (0..num_packets_per_bundle)
+ .map(|_| {
+ VersionedTransaction::from(transfer(
+ mint_keypair,
+ &mint_keypair.pubkey(),
+ rng.next_u64() % max_transfer_amount,
+ hash,
+ ))
+ })
+ .collect();
+ let bundle_id = derive_bundle_id(&transfers);
+
+ PacketBundle {
+ batch: PacketBatch::new(
+ transfers
+ .iter()
+ .map(|tx| Packet::from_data(None, tx).unwrap())
+ .collect(),
+ ),
+ bundle_id,
+ }
+ })
+ .collect()
+ }
+
+ fn get_tip_manager(vote_account: &Pubkey) -> TipManager {
+ TipManager::new(TipManagerConfig {
+ tip_payment_program_id: Pubkey::from_str("T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt")
+ .unwrap(),
+ tip_distribution_program_id: Pubkey::from_str(
+ "4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7",
+ )
+ .unwrap(),
+ tip_distribution_account_config: TipDistributionAccountConfig {
+ merkle_root_upload_authority: Pubkey::new_unique(),
+ vote_account: *vote_account,
+ commission_bps: 10,
+ },
+ })
+ }
+
+ /// Happy-path bundle execution w/ no tip management
+ #[test]
+ fn test_bundle_no_tip_success() {
+ solana_logger::setup();
+ let TestFixture {
+ genesis_config_info,
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ } = create_test_fixture(1_000_000);
+ let recorder = poh_recorder.read().unwrap().new_recorder();
+
+ let status = poh_recorder.read().unwrap().reached_leader_slot();
+ info!("status: {:?}", status);
+
+ let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+ let committer = Committer::new(
+ None,
+ replay_vote_sender,
+ Arc::new(PrioritizationFeeCache::new(0u64)),
+ );
+
+ let block_builder_pubkey = Pubkey::new_unique();
+ let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey());
+ let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo {
+ block_builder: block_builder_pubkey,
+ block_builder_commission: 10,
+ }));
+
+ let cluster_info = Arc::new(ClusterInfo::new(
+ ContactInfo::new(leader_keypair.pubkey(), 0, 0),
+ Arc::new(leader_keypair),
+ SocketAddrSpace::new(true),
+ ));
+
+ let mut consumer = BundleConsumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ tip_manager,
+ BundleAccountLocker::default(),
+ block_builder_info,
+ Duration::from_secs(10),
+ cluster_info,
+ BundleReservedSpaceManager::new(
+ MAX_BLOCK_UNITS,
+ 3_000_000,
+ poh_recorder
+ .read()
+ .unwrap()
+ .ticks_per_slot()
+ .saturating_mul(8)
+ .saturating_div(10),
+ ),
+ );
+
+ let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
+
+ let mut bundle_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1);
+
+ let mut packet_bundles = make_random_overlapping_bundles(
+ &genesis_config_info.mint_keypair,
+ 1,
+ 3,
+ genesis_config_info.genesis_config.hash(),
+ 10_000,
+ );
+ let deserialized_bundle = BundlePacketDeserializer::deserialize_bundle(
+ packet_bundles.get_mut(0).unwrap(),
+ false,
+ None,
+ )
+ .unwrap();
+ let mut error_metrics = TransactionErrorMetrics::default();
+ let sanitized_bundle = deserialized_bundle
+ .build_sanitized_bundle(
+ &bank_start.working_bank,
+ &HashSet::default(),
+ &mut error_metrics,
+ )
+ .unwrap();
+
+ let summary = bundle_storage.insert_bundles(vec![deserialized_bundle]);
+ assert_eq!(
+ summary.num_packets_inserted,
+ sanitized_bundle.transactions.len()
+ );
+ assert_eq!(summary.num_bundles_dropped, 0);
+ assert_eq!(summary.num_bundles_inserted, 1);
+
+ consumer.consume_buffered_bundles(
+ &bank_start,
+ &mut bundle_storage,
+ &mut bundle_stage_leader_metrics,
+ );
+
+ let mut transactions = Vec::new();
+ while let Ok(WorkingBankEntry {
+ bank: wbe_bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert_eq!(bank.slot(), wbe_bank.slot());
+ for (entry, _) in entries_ticks {
+ if !entry.transactions.is_empty() {
+ // transactions in this test are all overlapping, so each entry will contain 1 transaction
+ assert_eq!(entry.transactions.len(), 1);
+ transactions.extend(entry.transactions);
+ }
+ }
+ if transactions.len() == sanitized_bundle.transactions.len() {
+ break;
+ }
+ }
+
+ let bundle_versioned_transactions: Vec<_> = sanitized_bundle
+ .transactions
+ .iter()
+ .map(|tx| tx.to_versioned_transaction())
+ .collect();
+ assert_eq!(transactions, bundle_versioned_transactions);
+
+ let check_results = bank.check_transactions(
+ &sanitized_bundle.transactions,
+ &vec![Ok(()); sanitized_bundle.transactions.len()],
+ MAX_PROCESSING_AGE,
+ &mut error_metrics,
+ );
+ assert_eq!(
+ check_results,
+ vec![
+ (Err(TransactionError::AlreadyProcessed), None);
+ sanitized_bundle.transactions.len()
+ ]
+ );
+
+ poh_recorder
+ .write()
+ .unwrap()
+ .is_exited
+ .store(true, Ordering::Relaxed);
+ exit.store(true, Ordering::Relaxed);
+ poh_simulator.join().unwrap();
+ // TODO (LB): cleanup blockstore
+ }
+
+ /// Happy-path bundle execution to ensure tip management works.
+ /// Tip management involves cranking setup bundles before executing the test bundle
+ #[test]
+ fn test_bundle_tip_program_setup_success() {
+ solana_logger::setup();
+ let TestFixture {
+ genesis_config_info,
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ } = create_test_fixture(1_000_000);
+ let recorder = poh_recorder.read().unwrap().new_recorder();
+
+ let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+ let committer = Committer::new(
+ None,
+ replay_vote_sender,
+ Arc::new(PrioritizationFeeCache::new(0u64)),
+ );
+
+ let block_builder_pubkey = Pubkey::new_unique();
+ let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey());
+ let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo {
+ block_builder: block_builder_pubkey,
+ block_builder_commission: 10,
+ }));
+
+ let cluster_info = Arc::new(ClusterInfo::new(
+ ContactInfo::new(leader_keypair.pubkey(), 0, 0),
+ Arc::new(leader_keypair),
+ SocketAddrSpace::new(true),
+ ));
+
+ let mut consumer = BundleConsumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ tip_manager.clone(),
+ BundleAccountLocker::default(),
+ block_builder_info,
+ Duration::from_secs(10),
+ cluster_info.clone(),
+ BundleReservedSpaceManager::new(
+ MAX_BLOCK_UNITS,
+ 3_000_000,
+ poh_recorder
+ .read()
+ .unwrap()
+ .ticks_per_slot()
+ .saturating_mul(8)
+ .saturating_div(10),
+ ),
+ );
+
+ let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
+
+ let mut bundle_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1);
+ // MAIN LOGIC
+
+ // a bundle that tips the tip program
+ let tip_accounts = tip_manager.get_tip_accounts();
+ let tip_account = tip_accounts.iter().collect::>()[0];
+ let mut packet_bundle = PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(
+ None,
+ transfer(
+ &genesis_config_info.mint_keypair,
+ tip_account,
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ )
+ .unwrap()]),
+ bundle_id: "test_transfer".to_string(),
+ };
+
+ let deserialized_bundle =
+ BundlePacketDeserializer::deserialize_bundle(&mut packet_bundle, false, None).unwrap();
+ let mut error_metrics = TransactionErrorMetrics::default();
+ let sanitized_bundle = deserialized_bundle
+ .build_sanitized_bundle(
+ &bank_start.working_bank,
+ &HashSet::default(),
+ &mut error_metrics,
+ )
+ .unwrap();
+
+ let summary = bundle_storage.insert_bundles(vec![deserialized_bundle]);
+ assert_eq!(summary.num_bundles_inserted, 1);
+ assert_eq!(summary.num_packets_inserted, 1);
+ assert_eq!(summary.num_bundles_dropped, 0);
+
+ consumer.consume_buffered_bundles(
+ &bank_start,
+ &mut bundle_storage,
+ &mut bundle_stage_leader_metrics,
+ );
+
+ // its expected there are 3 transactions. One to initialize the tip program configuration, one to change the tip receiver,
+ // and another with the tip
+
+ let mut transactions = Vec::new();
+ while let Ok(WorkingBankEntry {
+ bank: wbe_bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert_eq!(bank.slot(), wbe_bank.slot());
+ transactions.extend(entries_ticks.into_iter().flat_map(|(e, _)| e.transactions));
+ if transactions.len() == 5 {
+ break;
+ }
+ }
+
+ // tip management on the first bundle involves:
+ // calling initialize on the tip payment and tip distribution programs
+ // creating the tip distribution account for this validator's epoch (the MEV piggy bank)
+ // changing the tip receiver and block builder tx
+ // the original transfer that was sent
+ let keypair = cluster_info.keypair().clone();
+
+ assert_eq!(
+ transactions[0],
+ tip_manager
+ .initialize_tip_payment_program_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[1],
+ tip_manager
+ .initialize_tip_distribution_config_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[2],
+ tip_manager
+ .initialize_tip_distribution_account_tx(
+ bank.last_blockhash(),
+ bank.epoch(),
+ &keypair
+ )
+ .to_versioned_transaction()
+ );
+ // the first tip receiver + block builder are the initializer (keypair.pubkey()) as set by the
+ // TipPayment program during initialization
+ assert_eq!(
+ transactions[3],
+ tip_manager
+ .build_change_tip_receiver_and_block_builder_tx(
+ &keypair.pubkey(),
+ &derive_tip_distribution_account_address(
+ &tip_manager.tip_distribution_program_id(),
+ &genesis_config_info.validator_pubkey,
+ bank_start.working_bank.epoch()
+ )
+ .0,
+ &bank_start.working_bank,
+ &keypair,
+ &keypair.pubkey(),
+ &block_builder_pubkey,
+ 10
+ )
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[4],
+ sanitized_bundle.transactions[0].to_versioned_transaction()
+ );
+
+ poh_recorder
+ .write()
+ .unwrap()
+ .is_exited
+ .store(true, Ordering::Relaxed);
+ exit.store(true, Ordering::Relaxed);
+ poh_simulator.join().unwrap();
+ }
+
+ #[test]
+ fn test_handle_tip_programs() {
+ solana_logger::setup();
+ let TestFixture {
+ genesis_config_info,
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ } = create_test_fixture(1_000_000);
+ let recorder = poh_recorder.read().unwrap().new_recorder();
+
+ let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+ let committer = Committer::new(
+ None,
+ replay_vote_sender,
+ Arc::new(PrioritizationFeeCache::new(0u64)),
+ );
+
+ let block_builder_pubkey = Pubkey::new_unique();
+ let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey());
+ let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo {
+ block_builder: block_builder_pubkey,
+ block_builder_commission: 10,
+ }));
+
+ let cluster_info = Arc::new(ClusterInfo::new(
+ ContactInfo::new(leader_keypair.pubkey(), 0, 0),
+ Arc::new(leader_keypair),
+ SocketAddrSpace::new(true),
+ ));
+
+ let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
+
+ let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space =
+ BundleReservedSpaceManager::new(MAX_BLOCK_UNITS, 3_000_000, reserved_ticks);
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1);
+ assert_matches!(
+ BundleConsumer::handle_tip_programs(
+ &BundleAccountLocker::default(),
+ &tip_manager,
+ &cluster_info,
+ &block_builder_info,
+ &committer,
+ &recorder,
+ &QosService::new(1),
+ &None,
+ Duration::from_secs(10),
+ &reserved_space,
+ &bank_start,
+ &mut bundle_stage_leader_metrics
+ ),
+ Ok(())
+ );
+
+ let mut transactions = Vec::new();
+ while let Ok(WorkingBankEntry {
+ bank: wbe_bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert_eq!(bank.slot(), wbe_bank.slot());
+ transactions.extend(entries_ticks.into_iter().flat_map(|(e, _)| e.transactions));
+ if transactions.len() == 4 {
+ break;
+ }
+ }
+
+ let keypair = cluster_info.keypair().clone();
+ // expect to see initialize tip payment program, tip distribution program, initialize tip distribution account, change tip receiver + change block builder
+ assert_eq!(
+ transactions[0],
+ tip_manager
+ .initialize_tip_payment_program_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[1],
+ tip_manager
+ .initialize_tip_distribution_config_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[2],
+ tip_manager
+ .initialize_tip_distribution_account_tx(
+ bank.last_blockhash(),
+ bank.epoch(),
+ &keypair
+ )
+ .to_versioned_transaction()
+ );
+ // the first tip receiver + block builder are the initializer (keypair.pubkey()) as set by the
+ // TipPayment program during initialization
+ assert_eq!(
+ transactions[3],
+ tip_manager
+ .build_change_tip_receiver_and_block_builder_tx(
+ &keypair.pubkey(),
+ &derive_tip_distribution_account_address(
+ &tip_manager.tip_distribution_program_id(),
+ &genesis_config_info.validator_pubkey,
+ bank_start.working_bank.epoch()
+ )
+ .0,
+ &bank_start.working_bank,
+ &keypair,
+ &keypair.pubkey(),
+ &block_builder_pubkey,
+ 10
+ )
+ .to_versioned_transaction()
+ );
+
+ poh_recorder
+ .write()
+ .unwrap()
+ .is_exited
+ .store(true, Ordering::Relaxed);
+ exit.store(true, Ordering::Relaxed);
+ poh_simulator.join().unwrap();
+ }
+
+ #[test]
+ fn test_reserve_bundle_blockspace_success() {
+ let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config));
+
+ let keypair1 = Keypair::new();
+ let keypair2 = Keypair::new();
+ let transfer_tx = SanitizedTransaction::from_transaction_for_tests(transfer(
+ &keypair1,
+ &keypair2.pubkey(),
+ 1,
+ bank.parent_hash(),
+ ));
+ let sanitized_bundle = SanitizedBundle {
+ transactions: vec![transfer_tx],
+ bundle_id: String::default(),
+ };
+
+ let transfer_cost =
+ CostModel::calculate_cost(&sanitized_bundle.transactions[0], &bank.feature_set);
+
+ let qos_service = QosService::new(1);
+ let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space =
+ BundleReservedSpaceManager::new(MAX_BLOCK_UNITS, 3_000_000, reserved_ticks);
+
+ assert!(BundleConsumer::reserve_bundle_blockspace(
+ &qos_service,
+ &reserved_space,
+ &sanitized_bundle,
+ &bank
+ )
+ .is_ok());
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost(),
+ transfer_cost.sum()
+ );
+ }
+
+ #[test]
+ fn test_reserve_bundle_blockspace_failure() {
+ let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config));
+
+ let keypair1 = Keypair::new();
+ let keypair2 = Keypair::new();
+ let transfer_tx1 = SanitizedTransaction::from_transaction_for_tests(transfer(
+ &keypair1,
+ &keypair2.pubkey(),
+ 1,
+ bank.parent_hash(),
+ ));
+ let transfer_tx2 = SanitizedTransaction::from_transaction_for_tests(transfer(
+ &keypair1,
+ &keypair2.pubkey(),
+ 2,
+ bank.parent_hash(),
+ ));
+ let sanitized_bundle = SanitizedBundle {
+ transactions: vec![transfer_tx1, transfer_tx2],
+ bundle_id: String::default(),
+ };
+
+ // set block cost limit to 1 transfer transaction, try to process 2, should return an error
+ // and rollback block cost added
+ let transfer_cost =
+ CostModel::calculate_cost(&sanitized_bundle.transactions[0], &bank.feature_set);
+ bank.write_cost_tracker()
+ .unwrap()
+ .set_block_cost_limit(transfer_cost.sum());
+
+ let qos_service = QosService::new(1);
+ let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space = BundleReservedSpaceManager::new(
+ bank.read_cost_tracker().unwrap().block_cost(),
+ 50,
+ reserved_ticks,
+ );
+
+ assert!(BundleConsumer::reserve_bundle_blockspace(
+ &qos_service,
+ &reserved_space,
+ &sanitized_bundle,
+ &bank
+ )
+ .is_err());
+ assert_eq!(bank.read_cost_tracker().unwrap().block_cost(), 0);
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ bank.read_cost_tracker()
+ .unwrap()
+ .block_cost_limit()
+ .saturating_sub(50)
+ );
+ }
+}
diff --git a/core/src/bundle_stage/bundle_packet_deserializer.rs b/core/src/bundle_stage/bundle_packet_deserializer.rs
new file mode 100644
index 0000000000..b3af110fc3
--- /dev/null
+++ b/core/src/bundle_stage/bundle_packet_deserializer.rs
@@ -0,0 +1,286 @@
+//! Deserializes PacketBundles
+use {
+ crate::{
+ immutable_deserialized_bundle::{DeserializedBundleError, ImmutableDeserializedBundle},
+ packet_bundle::PacketBundle,
+ },
+ crossbeam_channel::{Receiver, RecvTimeoutError},
+ solana_runtime::bank_forks::BankForks,
+ solana_sdk::saturating_add_assign,
+ std::{
+ sync::{Arc, RwLock},
+ time::{Duration, Instant},
+ },
+};
+
+/// Results from deserializing packet batches.
+#[derive(Debug)]
+pub struct ReceiveBundleResults {
+ /// Deserialized bundles from all received bundle packets
+ pub deserialized_bundles: Vec,
+ /// Number of dropped bundles
+ pub num_dropped_bundles: usize,
+ /// Number of dropped packets
+ pub num_dropped_packets: usize,
+}
+
+pub struct BundlePacketDeserializer {
+ /// Receiver for bundle packets
+ bundle_packet_receiver: Receiver>,
+ /// Provides working bank for deserializer to check feature activation
+ bank_forks: Arc>,
+ /// Max packets per bundle
+ max_packets_per_bundle: Option,
+}
+
+impl BundlePacketDeserializer {
+ pub fn new(
+ bundle_packet_receiver: Receiver>,
+ bank_forks: Arc>,
+ max_packets_per_bundle: Option,
+ ) -> Self {
+ Self {
+ bundle_packet_receiver,
+ bank_forks,
+ max_packets_per_bundle,
+ }
+ }
+
+ /// Handles receiving bundles and deserializing them
+ pub fn receive_bundles(
+ &self,
+ recv_timeout: Duration,
+ capacity: usize,
+ ) -> Result {
+ let (bundle_count, _packet_count, mut bundles) =
+ self.receive_until(recv_timeout, capacity)?;
+
+ // Note: this can be removed after feature `round_compute_unit_price` is activated in
+ // mainnet-beta
+ let _working_bank = self.bank_forks.read().unwrap().working_bank();
+ let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set
+
+ Ok(Self::deserialize_and_collect_bundles(
+ bundle_count,
+ &mut bundles,
+ round_compute_unit_price_enabled,
+ self.max_packets_per_bundle,
+ ))
+ }
+
+ /// Deserialize packet batches, aggregates tracer packet stats, and collect
+ /// them into ReceivePacketResults
+ fn deserialize_and_collect_bundles(
+ bundle_count: usize,
+ bundles: &mut [PacketBundle],
+ round_compute_unit_price_enabled: bool,
+ max_packets_per_bundle: Option,
+ ) -> ReceiveBundleResults {
+ let mut deserialized_bundles = Vec::with_capacity(bundle_count);
+ let mut num_dropped_bundles: usize = 0;
+ let mut num_dropped_packets: usize = 0;
+
+ for bundle in bundles.iter_mut() {
+ match Self::deserialize_bundle(
+ bundle,
+ round_compute_unit_price_enabled,
+ max_packets_per_bundle,
+ ) {
+ Ok(deserialized_bundle) => {
+ deserialized_bundles.push(deserialized_bundle);
+ }
+ Err(_) => {
+ // TODO (LB): prob wanna collect stats here
+ saturating_add_assign!(num_dropped_bundles, 1);
+ saturating_add_assign!(num_dropped_packets, bundle.batch.len());
+ }
+ }
+ }
+
+ ReceiveBundleResults {
+ deserialized_bundles,
+ num_dropped_bundles,
+ num_dropped_packets,
+ }
+ }
+
+ /// Receives bundle packets
+ fn receive_until(
+ &self,
+ recv_timeout: Duration,
+ bundle_count_upperbound: usize,
+ ) -> Result<(usize, usize, Vec), RecvTimeoutError> {
+ let start = Instant::now();
+
+ let mut bundles = self.bundle_packet_receiver.recv_timeout(recv_timeout)?;
+ let mut num_packets_received: usize = bundles.iter().map(|pb| pb.batch.len()).sum();
+ let mut num_bundles_received: usize = bundles.len();
+
+ if num_bundles_received <= bundle_count_upperbound {
+ while let Ok(bundle_packets) = self.bundle_packet_receiver.try_recv() {
+ trace!("got more packet batches in bundle packet deserializer");
+
+ saturating_add_assign!(
+ num_packets_received,
+ bundle_packets
+ .iter()
+ .map(|pb| pb.batch.len())
+ .sum::()
+ );
+ saturating_add_assign!(num_bundles_received, bundle_packets.len());
+
+ bundles.extend(bundle_packets);
+
+ if start.elapsed() >= recv_timeout
+ || num_bundles_received >= bundle_count_upperbound
+ {
+ break;
+ }
+ }
+ }
+
+ Ok((num_bundles_received, num_packets_received, bundles))
+ }
+
+ /// Deserializes the Bundle into DeserializedBundlePackets, returning None if any packet in the
+ /// bundle failed to deserialize
+ pub fn deserialize_bundle(
+ bundle: &mut PacketBundle,
+ round_compute_unit_price_enabled: bool,
+ max_packets_per_bundle: Option,
+ ) -> Result {
+ bundle.batch.iter_mut().for_each(|p| {
+ p.meta_mut()
+ .set_round_compute_unit_price(round_compute_unit_price_enabled);
+ });
+
+ ImmutableDeserializedBundle::new(bundle, max_packets_per_bundle)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ super::*,
+ crossbeam_channel::unbounded,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{packet::Packet, signature::Signer, system_transaction::transfer},
+ };
+
+ #[test]
+ fn test_deserialize_and_collect_bundles_empty() {
+ let results =
+ BundlePacketDeserializer::deserialize_and_collect_bundles(0, &mut [], false, Some(5));
+ assert_eq!(results.deserialized_bundles.len(), 0);
+ assert_eq!(results.num_dropped_packets, 0);
+ assert_eq!(results.num_dropped_bundles, 0);
+ }
+
+ #[test]
+ fn test_receive_bundles_capacity() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let (sender, receiver) = unbounded();
+
+ let deserializer = BundlePacketDeserializer::new(receiver, bank_forks, Some(10));
+
+ let packet_bundles: Vec<_> = (0..10)
+ .map(|_| PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(
+ None,
+ transfer(
+ &mint_keypair,
+ &mint_keypair.pubkey(),
+ 100,
+ genesis_config.hash(),
+ ),
+ )
+ .unwrap()]),
+ bundle_id: String::default(),
+ })
+ .collect();
+
+ sender.send(packet_bundles.clone()).unwrap();
+
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ // this is confusing, but it's sent as one batch
+ assert_eq!(bundles.deserialized_bundles.len(), 10);
+ assert_eq!(bundles.num_dropped_bundles, 0);
+ assert_eq!(bundles.num_dropped_packets, 0);
+
+ // make sure empty
+ assert_matches!(
+ deserializer.receive_bundles(Duration::from_millis(100), 5),
+ Err(RecvTimeoutError::Timeout)
+ );
+
+ // send 2x 10 size batches. capacity is 5, but will return 10 since that's the batch size
+ sender.send(packet_bundles.clone()).unwrap();
+ sender.send(packet_bundles).unwrap();
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ assert_eq!(bundles.deserialized_bundles.len(), 10);
+ assert_eq!(bundles.num_dropped_bundles, 0);
+ assert_eq!(bundles.num_dropped_packets, 0);
+
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ assert_eq!(bundles.deserialized_bundles.len(), 10);
+ assert_eq!(bundles.num_dropped_bundles, 0);
+ assert_eq!(bundles.num_dropped_packets, 0);
+
+ assert_matches!(
+ deserializer.receive_bundles(Duration::from_millis(100), 5),
+ Err(RecvTimeoutError::Timeout)
+ );
+ }
+
+ #[test]
+ fn test_receive_bundles_bad_bundles() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair: _,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let (sender, receiver) = unbounded();
+
+ let deserializer = BundlePacketDeserializer::new(receiver, bank_forks, Some(10));
+
+ let packet_bundles: Vec<_> = (0..10)
+ .map(|_| PacketBundle {
+ batch: PacketBatch::new(vec![]),
+ bundle_id: String::default(),
+ })
+ .collect();
+ sender.send(packet_bundles).unwrap();
+
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ // this is confusing, but it's sent as one batch
+ assert_eq!(bundles.deserialized_bundles.len(), 0);
+ assert_eq!(bundles.num_dropped_bundles, 10);
+ assert_eq!(bundles.num_dropped_packets, 0);
+ }
+}
diff --git a/core/src/bundle_stage/bundle_packet_receiver.rs b/core/src/bundle_stage/bundle_packet_receiver.rs
new file mode 100644
index 0000000000..ffdf47c7f7
--- /dev/null
+++ b/core/src/bundle_stage/bundle_packet_receiver.rs
@@ -0,0 +1,848 @@
+use {
+ super::BundleStageLoopMetrics,
+ crate::{
+ bundle_stage::{
+ bundle_packet_deserializer::{BundlePacketDeserializer, ReceiveBundleResults},
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics,
+ },
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ packet_bundle::PacketBundle,
+ unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ },
+ crossbeam_channel::{Receiver, RecvTimeoutError},
+ solana_measure::{measure::Measure, measure_us},
+ solana_runtime::bank_forks::BankForks,
+ solana_sdk::timing::timestamp,
+ std::{
+ sync::{Arc, RwLock},
+ time::Duration,
+ },
+};
+
+pub struct BundleReceiver {
+ id: u32,
+ bundle_packet_deserializer: BundlePacketDeserializer,
+}
+
+impl BundleReceiver {
+ pub fn new(
+ id: u32,
+ bundle_packet_receiver: Receiver>,
+ bank_forks: Arc>,
+ max_packets_per_bundle: Option,
+ ) -> Self {
+ Self {
+ id,
+ bundle_packet_deserializer: BundlePacketDeserializer::new(
+ bundle_packet_receiver,
+ bank_forks,
+ max_packets_per_bundle,
+ ),
+ }
+ }
+
+ /// Receive incoming packets, push into unprocessed buffer with packet indexes
+ pub fn receive_and_buffer_bundles(
+ &mut self,
+ unprocessed_bundle_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_metrics: &mut BundleStageLoopMetrics,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Result<(), RecvTimeoutError> {
+ let (result, recv_time_us) = measure_us!({
+ let recv_timeout = Self::get_receive_timeout(unprocessed_bundle_storage);
+ let mut recv_and_buffer_measure = Measure::start("recv_and_buffer");
+ self.bundle_packet_deserializer
+ .receive_bundles(recv_timeout, unprocessed_bundle_storage.max_receive_size())
+ // Consumes results if Ok, otherwise we keep the Err
+ .map(|receive_bundle_results| {
+ self.buffer_bundles(
+ receive_bundle_results,
+ unprocessed_bundle_storage,
+ bundle_stage_metrics,
+ // tracer_packet_stats,
+ bundle_stage_leader_metrics,
+ );
+ recv_and_buffer_measure.stop();
+ bundle_stage_metrics.increment_receive_and_buffer_bundles_elapsed_us(
+ recv_and_buffer_measure.as_us(),
+ );
+ })
+ });
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_receive_and_buffer_packets_us(recv_time_us);
+
+ result
+ }
+
+ fn get_receive_timeout(
+ unprocessed_transaction_storage: &UnprocessedTransactionStorage,
+ ) -> Duration {
+ // Gossip thread will almost always not wait because the transaction storage will most likely not be empty
+ if !unprocessed_transaction_storage.is_empty() {
+ // If there are buffered packets, run the equivalent of try_recv to try reading more
+ // packets. This prevents starving BankingStage::consume_buffered_packets due to
+ // buffered_packet_batches containing transactions that exceed the cost model for
+ // the current bank.
+ Duration::from_millis(0)
+ } else {
+ // BundleStage should pick up a working_bank as fast as possible
+ Duration::from_millis(100)
+ }
+ }
+
+ fn buffer_bundles(
+ &self,
+ ReceiveBundleResults {
+ deserialized_bundles,
+ num_dropped_bundles: _,
+ num_dropped_packets: _,
+ }: ReceiveBundleResults,
+ unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_stats: &mut BundleStageLoopMetrics,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) {
+ let bundle_count = deserialized_bundles.len();
+ let packet_count: usize = deserialized_bundles.iter().map(|b| b.len()).sum();
+
+ bundle_stage_stats.increment_num_bundles_received(bundle_count as u64);
+ bundle_stage_stats.increment_num_packets_received(packet_count as u64);
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_total_new_valid_packets(packet_count as u64);
+
+ debug!(
+ "@{:?} bundles: {} txs: {} id: {}",
+ timestamp(),
+ bundle_count,
+ packet_count,
+ self.id
+ );
+
+ Self::push_unprocessed(
+ unprocessed_transaction_storage,
+ deserialized_bundles,
+ bundle_stage_leader_metrics,
+ bundle_stage_stats,
+ );
+ }
+
+ fn push_unprocessed(
+ unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
+ deserialized_bundles: Vec,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ bundle_stage_stats: &mut BundleStageLoopMetrics,
+ ) {
+ if !deserialized_bundles.is_empty() {
+ let insert_bundles_summary =
+ unprocessed_transaction_storage.insert_bundles(deserialized_bundles);
+
+ bundle_stage_stats.increment_newly_buffered_bundles_count(
+ insert_bundles_summary.num_bundles_inserted as u64,
+ );
+ bundle_stage_stats
+ .increment_num_bundles_dropped(insert_bundles_summary.num_bundles_dropped as u64);
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_newly_buffered_packets_count(
+ insert_bundles_summary.num_packets_inserted as u64,
+ );
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_insert_packet_batches_summary(
+ &insert_bundles_summary.insert_packets_summary,
+ );
+ }
+ }
+}
+
+/// This tests functionality of BundlePacketReceiver and the internals of BundleStorage because
+/// they're tightly intertwined
+#[cfg(test)]
+mod tests {
+ use {
+ super::*,
+ crossbeam_channel::unbounded,
+ rand::{thread_rng, RngCore},
+ solana_bundle::{
+ bundle_execution::LoadAndExecuteBundleError, BundleExecutionError, TipError,
+ },
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_poh::poh_recorder::PohRecorderError,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{
+ bundle::{derive_bundle_id, SanitizedBundle},
+ hash::Hash,
+ packet::Packet,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ transaction::VersionedTransaction,
+ },
+ std::collections::{HashSet, VecDeque},
+ };
+
+ /// Makes `num_bundles` random bundles with `num_packets_per_bundle` packets per bundle.
+ fn make_random_bundles(
+ mint_keypair: &Keypair,
+ num_bundles: usize,
+ num_packets_per_bundle: usize,
+ hash: Hash,
+ ) -> Vec {
+ let mut rng = thread_rng();
+
+ (0..num_bundles)
+ .map(|_| {
+ let transfers: Vec<_> = (0..num_packets_per_bundle)
+ .map(|_| {
+ VersionedTransaction::from(transfer(
+ mint_keypair,
+ &mint_keypair.pubkey(),
+ rng.next_u64(),
+ hash,
+ ))
+ })
+ .collect();
+ let bundle_id = derive_bundle_id(&transfers);
+
+ PacketBundle {
+ batch: PacketBatch::new(
+ transfers
+ .iter()
+ .map(|tx| Packet::from_data(None, tx).unwrap())
+ .collect(),
+ ),
+ bundle_id,
+ }
+ })
+ .collect()
+ }
+
+ fn assert_bundles_same(
+ packet_bundles: &[PacketBundle],
+ bundles_to_process: &[(ImmutableDeserializedBundle, SanitizedBundle)],
+ ) {
+ assert_eq!(packet_bundles.len(), bundles_to_process.len());
+ packet_bundles
+ .iter()
+ .zip(bundles_to_process.iter())
+ .for_each(|(packet_bundle, (_, sanitized_bundle))| {
+ assert_eq!(packet_bundle.bundle_id, sanitized_bundle.bundle_id);
+ assert_eq!(
+ packet_bundle.batch.len(),
+ sanitized_bundle.transactions.len()
+ );
+ });
+ }
+
+ #[test]
+ fn test_receive_bundles() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(1_000),
+ VecDeque::with_capacity(1_000),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ let bundles = make_random_bundles(&mint_keypair, 10, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 10);
+ assert_eq!(bundle_storage.unprocessed_packets_len(), 20);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0);
+ assert_eq!(bundle_storage.max_receive_size(), 990);
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ (0..bundles_to_process.len()).map(|_| Ok(())).collect()
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.unprocessed_packets_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0);
+ assert_eq!(bundle_storage.max_receive_size(), 1000);
+ }
+
+ #[test]
+ fn test_receive_more_bundles_than_capacity() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ let bundles = make_random_bundles(&mint_keypair, 15, 2, genesis_config.hash());
+
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // 15 bundles were sent, but the capacity is 10
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 10);
+ assert_eq!(bundle_storage.unprocessed_packets_len(), 20);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0);
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ // make sure the first 10 bundles are the ones to process
+ assert_bundles_same(&bundles[0..10], bundles_to_process);
+ (0..bundles_to_process.len()).map(|_| Ok(())).collect()
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_poh_record_error_rebuffered() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let poh_max_height_reached_index = 3;
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ // make sure poh end of slot reached + the correct bundles are buffered for the next time.
+ // bundles at index 3 + 4 are rebuffered
+ assert!(bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+
+ let mut results = vec![Ok(()); bundles_to_process.len()];
+
+ (poh_max_height_reached_index..bundles_to_process.len()).for_each(|index| {
+ results[index] = Err(BundleExecutionError::PohRecordError(
+ PohRecorderError::MaxHeightReached,
+ ));
+ });
+ results
+ }
+ ));
+
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 2);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles[poh_max_height_reached_index..], bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_bank_processing_done_rebuffered() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bank_processing_done_index = 3;
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ // bundles at index 3 + 4 are rebuffered
+ assert!(bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+
+ let mut results = vec![Ok(()); bundles_to_process.len()];
+
+ (bank_processing_done_index..bundles_to_process.len()).for_each(|index| {
+ results[index] = Err(BundleExecutionError::BankProcessingTimeLimitReached);
+ });
+ results
+ }
+ ));
+
+ // 0, 1, 2 processed; 3, 4 buffered
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 2);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles[bank_processing_done_index..], bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_bank_execution_error_dropped() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(Duration::from_secs(1)),
+ ));
+ bundles_to_process.len()
+ ]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_tip_error_dropped() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![
+ Err(BundleExecutionError::TipError(TipError::LockError));
+ bundles_to_process.len()
+ ]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_lock_error_dropped() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ vec![Err(BundleExecutionError::LockError); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_cost_model_exceeded_set_aside_and_requeued() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ // buffered bundles are moved to cost model side deque
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 5);
+
+ // double check there's no bundles to process
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert!(bundles_to_process.is_empty());
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+
+ // create a new bank w/ new slot number, cost model buffered packets should move back onto queue
+ // in the same order they were originally
+ let bank = &bank_forks.read().unwrap().working_bank();
+ let new_bank = Arc::new(Bank::new_from_parent(
+ bank,
+ bank.collector_id(),
+ bank.slot() + 1,
+ ));
+ assert!(!bundle_storage.process_bundles(
+ new_bank,
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ // make sure same order as original
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_cost_model_exceeded_buffer_capacity() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks = Arc::new(RwLock::new(BankForks::new(
+ Bank::new_no_wallclock_throttle_for_tests(&genesis_config),
+ )));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 15 bundles across the queue
+ let bundles0 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles0.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+
+ // receive and buffer bundles to the cost model reserve to test the capacity/dropped bundles there
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // buffered bundles are moved to cost model side deque
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles0, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 5);
+
+ let bundles1 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles1.clone()).unwrap();
+ // should get 5 more bundles + cost model buffered length should be 10
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // buffered bundles are moved to cost model side deque
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles1, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 10);
+
+ let bundles2 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles2.clone()).unwrap();
+
+ // this set will get dropped from cost model buffered bundles
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // buffered bundles are moved to cost model side deque, but its at capacity so stays the same size
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles2, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 10);
+
+ // create new bank then call process_bundles again, expect to see [bundles1,bundles2]
+ let bank = &bank_forks.read().unwrap().working_bank();
+ let new_bank = Arc::new(Bank::new_from_parent(
+ bank,
+ bank.collector_id(),
+ bank.slot() + 1,
+ ));
+ assert!(!bundle_storage.process_bundles(
+ new_bank,
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ // make sure same order as original
+ let expected_bundles: Vec<_> =
+ bundles0.iter().chain(bundles1.iter()).cloned().collect();
+ assert_bundles_same(&expected_bundles, bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ }
+}
diff --git a/core/src/bundle_stage/bundle_reserved_space_manager.rs b/core/src/bundle_stage/bundle_reserved_space_manager.rs
new file mode 100644
index 0000000000..42cb7adeb6
--- /dev/null
+++ b/core/src/bundle_stage/bundle_reserved_space_manager.rs
@@ -0,0 +1,189 @@
+use {solana_runtime::bank::Bank, solana_sdk::clock::Slot, std::sync::Arc};
+
+/// Manager responsible for reserving `bundle_reserved_cost` during the first `reserved_ticks` of a bank
+/// and resetting the block cost limit to `block_cost_limit` after the reserved tick period is over
+pub struct BundleReservedSpaceManager {
+ // the bank's cost limit
+ block_cost_limit: u64,
+ // bundles get this much reserved space for the first reserved_ticks
+ bundle_reserved_cost: u64,
+ // a reduced block_compute_limit is reserved for this many ticks, afterwards it goes back to full cost
+ reserved_ticks: u64,
+ last_slot_updated: Slot,
+}
+
+impl BundleReservedSpaceManager {
+ pub fn new(block_cost_limit: u64, bundle_reserved_cost: u64, reserved_ticks: u64) -> Self {
+ Self {
+ block_cost_limit,
+ bundle_reserved_cost,
+ reserved_ticks,
+ last_slot_updated: u64::MAX,
+ }
+ }
+
+ /// Call this on creation of new bank and periodically while bundle processing
+ /// to manage the block_cost_limits
+ pub fn tick(&mut self, bank: &Arc) {
+ if self.last_slot_updated == bank.slot() && !self.is_in_reserved_tick_period(bank) {
+ // new slot logic already ran, need to revert the block cost limit to original if
+ // ticks are past the reserved tick mark
+ debug!(
+ "slot: {} ticks: {}, resetting block_cost_limit to {}",
+ bank.slot(),
+ bank.tick_height(),
+ self.block_cost_limit
+ );
+ bank.write_cost_tracker()
+ .unwrap()
+ .set_block_cost_limit(self.block_cost_limit);
+ } else if self.last_slot_updated != bank.slot() && self.is_in_reserved_tick_period(bank) {
+ // new slot, if in the first max_tick - tick_height slots reserve space
+ // otherwise can leave the current block limit as is
+ let new_block_cost_limit = self.reduced_block_cost_limit();
+ debug!(
+ "slot: {} ticks: {}, reserving block_cost_limit with block_cost_limit of {}",
+ bank.slot(),
+ bank.tick_height(),
+ new_block_cost_limit
+ );
+ bank.write_cost_tracker()
+ .unwrap()
+ .set_block_cost_limit(new_block_cost_limit);
+ self.last_slot_updated = bank.slot();
+ }
+ }
+
+ /// return true if the bank is still in the period where block_cost_limits is reduced
+ pub fn is_in_reserved_tick_period(&self, bank: &Bank) -> bool {
+ bank.tick_height() < self.reserved_ticks
+ }
+
+ /// return the block_cost_limits as determined by the tick height of the bank
+ pub fn expected_block_cost_limits(&self, bank: &Bank) -> u64 {
+ if self.is_in_reserved_tick_period(bank) {
+ self.reduced_block_cost_limit()
+ } else {
+ self.block_cost_limit()
+ }
+ }
+
+ pub fn reduced_block_cost_limit(&self) -> u64 {
+ self.block_cost_limit
+ .saturating_sub(self.bundle_reserved_cost)
+ }
+
+ pub fn block_cost_limit(&self) -> u64 {
+ self.block_cost_limit
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::bundle_stage::bundle_reserved_space_manager::BundleReservedSpaceManager,
+ solana_ledger::genesis_utils::create_genesis_config, solana_runtime::bank::Bank,
+ solana_sdk::hash::Hash, std::sync::Arc,
+ };
+
+ #[test]
+ fn test_reserve_block_cost_limits_during_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION
+ );
+ }
+
+ #[test]
+ fn test_dont_reserve_block_cost_limits_after_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ for _ in 0..5 {
+ bank.register_tick(&Hash::default());
+ }
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits
+ );
+ }
+
+ #[test]
+ fn test_dont_reset_block_cost_limits_during_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+
+ reserved_space.tick(&bank);
+ bank.register_tick(&Hash::default());
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION
+ );
+ }
+
+ #[test]
+ fn test_reset_block_cost_limits_after_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+
+ reserved_space.tick(&bank);
+
+ for _ in 0..5 {
+ bank.register_tick(&Hash::default());
+ }
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits
+ );
+ }
+}
diff --git a/core/src/bundle_stage/bundle_stage_leader_metrics.rs b/core/src/bundle_stage/bundle_stage_leader_metrics.rs
new file mode 100644
index 0000000000..a32e874bb3
--- /dev/null
+++ b/core/src/bundle_stage/bundle_stage_leader_metrics.rs
@@ -0,0 +1,502 @@
+use {
+ crate::{
+ immutable_deserialized_bundle::DeserializedBundleError,
+ leader_slot_banking_stage_metrics::{self, LeaderSlotMetricsTracker},
+ },
+ solana_bundle::{bundle_execution::LoadAndExecuteBundleError, BundleExecutionError},
+ solana_poh::poh_recorder::BankStart,
+ solana_sdk::{bundle::SanitizedBundle, clock::Slot, saturating_add_assign},
+};
+
+pub struct BundleStageLeaderMetrics {
+ bundle_stage_metrics_tracker: BundleStageStatsMetricsTracker,
+ leader_slot_metrics_tracker: LeaderSlotMetricsTracker,
+}
+
+pub(crate) enum MetricsTrackerAction {
+ Noop,
+ ReportAndResetTracker,
+ NewTracker(Option),
+ ReportAndNewTracker(Option),
+}
+
+impl BundleStageLeaderMetrics {
+ pub fn new(id: u32) -> Self {
+ Self {
+ bundle_stage_metrics_tracker: BundleStageStatsMetricsTracker::new(id),
+ leader_slot_metrics_tracker: LeaderSlotMetricsTracker::new(id),
+ }
+ }
+
+ pub(crate) fn check_leader_slot_boundary(
+ &mut self,
+ bank_start: Option<&BankStart>,
+ ) -> (
+ leader_slot_banking_stage_metrics::MetricsTrackerAction,
+ MetricsTrackerAction,
+ ) {
+ let banking_stage_metrics_action = self
+ .leader_slot_metrics_tracker
+ .check_leader_slot_boundary(bank_start);
+ let bundle_stage_metrics_action = self
+ .bundle_stage_metrics_tracker
+ .check_leader_slot_boundary(bank_start);
+ (banking_stage_metrics_action, bundle_stage_metrics_action)
+ }
+
+ pub(crate) fn apply_action(
+ &mut self,
+ banking_stage_metrics_action: leader_slot_banking_stage_metrics::MetricsTrackerAction,
+ bundle_stage_metrics_action: MetricsTrackerAction,
+ ) -> Option {
+ self.leader_slot_metrics_tracker
+ .apply_action(banking_stage_metrics_action);
+ self.bundle_stage_metrics_tracker
+ .apply_action(bundle_stage_metrics_action)
+ }
+
+ pub fn leader_slot_metrics_tracker(&mut self) -> &mut LeaderSlotMetricsTracker {
+ &mut self.leader_slot_metrics_tracker
+ }
+
+ pub fn bundle_stage_metrics_tracker(&mut self) -> &mut BundleStageStatsMetricsTracker {
+ &mut self.bundle_stage_metrics_tracker
+ }
+}
+
+pub struct BundleStageStatsMetricsTracker {
+ bundle_stage_metrics: Option,
+ id: u32,
+}
+
+impl BundleStageStatsMetricsTracker {
+ pub fn new(id: u32) -> Self {
+ Self {
+ bundle_stage_metrics: None,
+ id,
+ }
+ }
+
+ /// Similar to as LeaderSlotMetricsTracker::check_leader_slot_boundary
+ pub(crate) fn check_leader_slot_boundary(
+ &mut self,
+ bank_start: Option<&BankStart>,
+ ) -> MetricsTrackerAction {
+ match (self.bundle_stage_metrics.as_mut(), bank_start) {
+ (None, None) => MetricsTrackerAction::Noop,
+ (Some(_), None) => MetricsTrackerAction::ReportAndResetTracker,
+ // Our leader slot has begun, time to create a new slot tracker
+ (None, Some(bank_start)) => MetricsTrackerAction::NewTracker(Some(
+ BundleStageStats::new(self.id, bank_start.working_bank.slot()),
+ )),
+ (Some(bundle_stage_metrics), Some(bank_start)) => {
+ if bundle_stage_metrics.slot != bank_start.working_bank.slot() {
+ // Last slot has ended, new slot has began
+ MetricsTrackerAction::ReportAndNewTracker(Some(BundleStageStats::new(
+ self.id,
+ bank_start.working_bank.slot(),
+ )))
+ } else {
+ MetricsTrackerAction::Noop
+ }
+ }
+ }
+ }
+
+ /// Similar to LeaderSlotMetricsTracker::apply_action
+ pub(crate) fn apply_action(&mut self, action: MetricsTrackerAction) -> Option {
+ match action {
+ MetricsTrackerAction::Noop => None,
+ MetricsTrackerAction::ReportAndResetTracker => {
+ let mut reported_slot = None;
+ if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() {
+ bundle_stage_metrics.report();
+ reported_slot = bundle_stage_metrics.reported_slot();
+ }
+ self.bundle_stage_metrics = None;
+ reported_slot
+ }
+ MetricsTrackerAction::NewTracker(new_bundle_stage_metrics) => {
+ self.bundle_stage_metrics = new_bundle_stage_metrics;
+ self.bundle_stage_metrics.as_ref().unwrap().reported_slot()
+ }
+ MetricsTrackerAction::ReportAndNewTracker(new_bundle_stage_metrics) => {
+ let mut reported_slot = None;
+ if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() {
+ bundle_stage_metrics.report();
+ reported_slot = bundle_stage_metrics.reported_slot();
+ }
+ self.bundle_stage_metrics = new_bundle_stage_metrics;
+ reported_slot
+ }
+ }
+ }
+
+ pub(crate) fn increment_sanitize_transaction_result(
+ &mut self,
+ result: &Result,
+ ) {
+ if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() {
+ match result {
+ Ok(_) => {
+ saturating_add_assign!(bundle_stage_metrics.sanitize_transaction_ok, 1);
+ }
+ Err(e) => match e {
+ DeserializedBundleError::VoteOnlyMode => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_vote_only_mode,
+ 1
+ );
+ }
+ DeserializedBundleError::BlacklistedAccount => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_blacklisted_account,
+ 1
+ );
+ }
+ DeserializedBundleError::FailedToSerializeTransaction => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_to_serialize,
+ 1
+ );
+ }
+ DeserializedBundleError::DuplicateTransaction => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_duplicate_transaction,
+ 1
+ );
+ }
+ DeserializedBundleError::FailedCheckTransactions => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_check,
+ 1
+ );
+ }
+ DeserializedBundleError::FailedToSerializePacket => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_to_serialize,
+ 1
+ );
+ }
+ DeserializedBundleError::EmptyBatch => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_empty_batch,
+ 1
+ );
+ }
+ DeserializedBundleError::TooManyPackets => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_too_many_packets,
+ 1
+ );
+ }
+ DeserializedBundleError::MarkedDiscard => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_marked_discard,
+ 1
+ );
+ }
+ DeserializedBundleError::SignatureVerificationFailure => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_sig_verify_failed,
+ 1
+ );
+ }
+ },
+ }
+ }
+ }
+
+ pub fn increment_bundle_execution_result(&mut self, result: &Result<(), BundleExecutionError>) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ match result {
+ Ok(_) => {
+ saturating_add_assign!(bundle_stage_metrics.execution_results_ok, 1);
+ }
+ Err(BundleExecutionError::PohRecordError(_))
+ | Err(BundleExecutionError::BankProcessingTimeLimitReached) => {
+ saturating_add_assign!(
+ bundle_stage_metrics.execution_results_poh_max_height,
+ 1
+ );
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_),
+ )) => {
+ saturating_add_assign!(bundle_stage_metrics.num_execution_timeouts, 1);
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::TransactionError { .. },
+ )) => {
+ saturating_add_assign!(
+ bundle_stage_metrics.execution_results_transaction_failures,
+ 1
+ );
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::LockError { .. },
+ ))
+ | Err(BundleExecutionError::LockError) => {
+ saturating_add_assign!(bundle_stage_metrics.num_lock_errors, 1);
+ }
+ Err(BundleExecutionError::ExceedsCostModel) => {
+ saturating_add_assign!(
+ bundle_stage_metrics.execution_results_exceeds_cost_model,
+ 1
+ );
+ }
+ Err(BundleExecutionError::TipError(_)) => {
+ saturating_add_assign!(bundle_stage_metrics.execution_results_tip_errors, 1);
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::InvalidPreOrPostAccounts,
+ )) => {
+ saturating_add_assign!(bundle_stage_metrics.bad_argument, 1);
+ }
+ }
+ }
+ }
+
+ pub(crate) fn increment_sanitize_bundle_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.sanitize_bundle_elapsed_us, count);
+ }
+ }
+
+ pub(crate) fn increment_locked_bundle_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.locked_bundle_elapsed_us, count);
+ }
+ }
+
+ pub(crate) fn increment_num_init_tip_account_errors(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_init_tip_account_errors, count);
+ }
+ }
+
+ pub(crate) fn increment_num_init_tip_account_ok(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_init_tip_account_ok, count);
+ }
+ }
+
+ pub(crate) fn increment_num_change_tip_receiver_errors(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_change_tip_receiver_errors, count);
+ }
+ }
+
+ pub(crate) fn increment_num_change_tip_receiver_ok(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_change_tip_receiver_ok, count);
+ }
+ }
+
+ pub(crate) fn increment_change_tip_receiver_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.change_tip_receiver_elapsed_us, count);
+ }
+ }
+
+ pub(crate) fn increment_num_execution_retries(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_execution_retries, count);
+ }
+ }
+
+ pub(crate) fn increment_execute_locked_bundles_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(
+ bundle_stage_metrics.execute_locked_bundles_elapsed_us,
+ count
+ );
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct BundleStageStats {
+ id: u32,
+ slot: u64,
+ is_reported: bool,
+
+ sanitize_transaction_ok: u64,
+ sanitize_transaction_vote_only_mode: u64,
+ sanitize_transaction_blacklisted_account: u64,
+ sanitize_transaction_failed_to_serialize: u64,
+ sanitize_transaction_duplicate_transaction: u64,
+ sanitize_transaction_failed_check: u64,
+ sanitize_bundle_elapsed_us: u64,
+ sanitize_transaction_failed_empty_batch: u64,
+ sanitize_transaction_failed_too_many_packets: u64,
+ sanitize_transaction_failed_marked_discard: u64,
+ sanitize_transaction_failed_sig_verify_failed: u64,
+
+ locked_bundle_elapsed_us: u64,
+
+ num_lock_errors: u64,
+
+ num_init_tip_account_errors: u64,
+ num_init_tip_account_ok: u64,
+
+ num_change_tip_receiver_errors: u64,
+ num_change_tip_receiver_ok: u64,
+ change_tip_receiver_elapsed_us: u64,
+
+ num_execution_timeouts: u64,
+ num_execution_retries: u64,
+
+ execute_locked_bundles_elapsed_us: u64,
+
+ execution_results_ok: u64,
+ execution_results_poh_max_height: u64,
+ execution_results_transaction_failures: u64,
+ execution_results_exceeds_cost_model: u64,
+ execution_results_tip_errors: u64,
+ execution_results_max_retries: u64,
+
+ bad_argument: u64,
+}
+
+impl BundleStageStats {
+ pub fn new(id: u32, slot: Slot) -> BundleStageStats {
+ BundleStageStats {
+ id,
+ slot,
+ is_reported: false,
+ ..BundleStageStats::default()
+ }
+ }
+
+ /// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None
+ fn reported_slot(&self) -> Option {
+ if self.is_reported {
+ Some(self.slot)
+ } else {
+ None
+ }
+ }
+
+ pub fn report(&mut self) {
+ self.is_reported = true;
+
+ datapoint_info!(
+ "bundle_stage-stats",
+ ("id", self.id, i64),
+ ("slot", self.slot, i64),
+ ("num_sanitized_ok", self.sanitize_transaction_ok, i64),
+ (
+ "sanitize_transaction_vote_only_mode",
+ self.sanitize_transaction_vote_only_mode,
+ i64
+ ),
+ (
+ "sanitize_transaction_blacklisted_account",
+ self.sanitize_transaction_blacklisted_account,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_to_serialize",
+ self.sanitize_transaction_failed_to_serialize,
+ i64
+ ),
+ (
+ "sanitize_transaction_duplicate_transaction",
+ self.sanitize_transaction_duplicate_transaction,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_check",
+ self.sanitize_transaction_failed_check,
+ i64
+ ),
+ (
+ "sanitize_bundle_elapsed_us",
+ self.sanitize_bundle_elapsed_us,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_empty_batch",
+ self.sanitize_transaction_failed_empty_batch,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_too_many_packets",
+ self.sanitize_transaction_failed_too_many_packets,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_marked_discard",
+ self.sanitize_transaction_failed_marked_discard,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_sig_verify_failed",
+ self.sanitize_transaction_failed_sig_verify_failed,
+ i64
+ ),
+ (
+ "locked_bundle_elapsed_us",
+ self.locked_bundle_elapsed_us,
+ i64
+ ),
+ ("num_lock_errors", self.num_lock_errors, i64),
+ (
+ "num_init_tip_account_errors",
+ self.num_init_tip_account_errors,
+ i64
+ ),
+ ("num_init_tip_account_ok", self.num_init_tip_account_ok, i64),
+ (
+ "num_change_tip_receiver_errors",
+ self.num_change_tip_receiver_errors,
+ i64
+ ),
+ (
+ "num_change_tip_receiver_ok",
+ self.num_change_tip_receiver_ok,
+ i64
+ ),
+ (
+ "change_tip_receiver_elapsed_us",
+ self.change_tip_receiver_elapsed_us,
+ i64
+ ),
+ ("num_execution_timeouts", self.num_execution_timeouts, i64),
+ ("num_execution_retries", self.num_execution_retries, i64),
+ (
+ "execute_locked_bundles_elapsed_us",
+ self.execute_locked_bundles_elapsed_us,
+ i64
+ ),
+ ("execution_results_ok", self.execution_results_ok, i64),
+ (
+ "execution_results_poh_max_height",
+ self.execution_results_poh_max_height,
+ i64
+ ),
+ (
+ "execution_results_transaction_failures",
+ self.execution_results_transaction_failures,
+ i64
+ ),
+ (
+ "execution_results_exceeds_cost_model",
+ self.execution_results_exceeds_cost_model,
+ i64
+ ),
+ (
+ "execution_results_tip_errors",
+ self.execution_results_tip_errors,
+ i64
+ ),
+ (
+ "execution_results_max_retries",
+ self.execution_results_max_retries,
+ i64
+ ),
+ ("bad_argument", self.bad_argument, i64)
+ );
+ }
+}
diff --git a/core/src/bundle_stage/committer.rs b/core/src/bundle_stage/committer.rs
new file mode 100644
index 0000000000..ae87c25aaf
--- /dev/null
+++ b/core/src/bundle_stage/committer.rs
@@ -0,0 +1,221 @@
+use {
+ crate::{
+ banking_stage::committer::CommitTransactionDetails,
+ leader_slot_banking_stage_timing_metrics::LeaderExecuteAndCommitTimings,
+ },
+ solana_bundle::bundle_execution::LoadAndExecuteBundleOutput,
+ solana_ledger::blockstore_processor::TransactionStatusSender,
+ solana_measure::measure_us,
+ solana_runtime::{
+ bank::{
+ Bank, CommitTransactionCounts, TransactionBalances, TransactionBalancesSet,
+ TransactionResults,
+ },
+ bank_utils,
+ prioritization_fee_cache::PrioritizationFeeCache,
+ vote_sender_types::ReplayVoteSender,
+ },
+ solana_sdk::{saturating_add_assign, transaction::SanitizedTransaction},
+ solana_transaction_status::{
+ token_balances::{TransactionTokenBalances, TransactionTokenBalancesSet},
+ PreBalanceInfo,
+ },
+ std::sync::Arc,
+};
+
+#[derive(Clone, Debug, Default, PartialEq, Eq)]
+pub struct CommitBundleDetails {
+ pub commit_transaction_details: Vec>,
+}
+
+pub struct Committer {
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ prioritization_fee_cache: Arc,
+}
+
+impl Committer {
+ pub fn new(
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ prioritization_fee_cache: Arc,
+ ) -> Self {
+ Self {
+ transaction_status_sender,
+ replay_vote_sender,
+ prioritization_fee_cache,
+ }
+ }
+
+ pub(crate) fn transaction_status_sender_enabled(&self) -> bool {
+ self.transaction_status_sender.is_some()
+ }
+
+ /// Very similar to Committer::commit_transactions, but works with bundles.
+ /// The main difference is there's multiple non-parallelizable transaction vectors to commit
+ /// and post-balances are collected after execution instead of from the bank in Self::collect_balances_and_send_status_batch.
+ #[allow(clippy::too_many_arguments)]
+ pub(crate) fn commit_bundle<'a>(
+ &self,
+ bundle_execution_output: &'a mut LoadAndExecuteBundleOutput<'a>,
+ mut starting_transaction_index: Option,
+ bank: &Arc,
+ execute_and_commit_timings: &mut LeaderExecuteAndCommitTimings,
+ ) -> (u64, CommitBundleDetails) {
+ let (last_blockhash, lamports_per_signature) =
+ bank.last_blockhash_and_lamports_per_signature();
+
+ let transaction_output = bundle_execution_output.bundle_transaction_results_mut();
+
+ let (commit_transaction_details, commit_times): (Vec<_>, Vec<_>) = transaction_output
+ .iter_mut()
+ .map(|bundle_results| {
+ let committed_transactions_count = bundle_results
+ .load_and_execute_transactions_output()
+ .executed_transactions_count
+ as u64;
+
+ let committed_non_vote_transactions_count = bundle_results
+ .load_and_execute_transactions_output()
+ .executed_non_vote_transactions_count
+ as u64;
+
+ let committed_with_failure_result_count = bundle_results
+ .load_and_execute_transactions_output()
+ .executed_transactions_count
+ .saturating_sub(
+ bundle_results
+ .load_and_execute_transactions_output()
+ .executed_with_successful_result_count,
+ ) as u64;
+
+ let signature_count = bundle_results
+ .load_and_execute_transactions_output()
+ .signature_count;
+
+ let sanitized_transactions = bundle_results.transactions().to_vec();
+ let execution_results = bundle_results.execution_results().to_vec();
+
+ let loaded_transactions = bundle_results.loaded_transactions_mut();
+ debug!("loaded_transactions: {:?}", loaded_transactions);
+
+ let (tx_results, commit_time_us) = measure_us!(bank.commit_transactions(
+ &sanitized_transactions,
+ loaded_transactions,
+ execution_results,
+ last_blockhash,
+ lamports_per_signature,
+ CommitTransactionCounts {
+ committed_transactions_count,
+ committed_non_vote_transactions_count,
+ committed_with_failure_result_count,
+ signature_count,
+ },
+ &mut execute_and_commit_timings.execute_timings,
+ ));
+
+ let commit_transaction_statuses: Vec<_> = tx_results
+ .execution_results
+ .iter()
+ .map(|execution_result| match execution_result.details() {
+ Some(details) => CommitTransactionDetails::Committed {
+ compute_units: details.executed_units,
+ },
+ None => CommitTransactionDetails::NotCommitted,
+ })
+ .collect();
+
+ let ((), find_and_send_votes_us) = measure_us!({
+ bank_utils::find_and_send_votes(
+ &sanitized_transactions,
+ &tx_results,
+ Some(&self.replay_vote_sender),
+ );
+
+ let post_balance_info = bundle_results.post_balance_info().clone();
+ let pre_balance_info = bundle_results.pre_balance_info();
+
+ let num_committed = tx_results
+ .execution_results
+ .iter()
+ .filter(|r| r.was_executed())
+ .count();
+
+ self.collect_balances_and_send_status_batch(
+ tx_results,
+ bank,
+ sanitized_transactions,
+ pre_balance_info,
+ post_balance_info,
+ starting_transaction_index,
+ );
+
+ // NOTE: we're doing batched records, so we need to increment the poh starting_transaction_index
+ // by number committed so the next batch will have the correct starting_transaction_index
+ starting_transaction_index =
+ starting_transaction_index.map(|starting_transaction_index| {
+ starting_transaction_index.saturating_add(num_committed)
+ });
+
+ self.prioritization_fee_cache
+ .update(bank, bundle_results.executed_transactions().into_iter());
+ });
+ saturating_add_assign!(
+ execute_and_commit_timings.find_and_send_votes_us,
+ find_and_send_votes_us
+ );
+
+ (commit_transaction_statuses, commit_time_us)
+ })
+ .unzip();
+
+ (
+ commit_times.iter().sum(),
+ CommitBundleDetails {
+ commit_transaction_details,
+ },
+ )
+ }
+
+ fn collect_balances_and_send_status_batch(
+ &self,
+ tx_results: TransactionResults,
+ bank: &Arc,
+ sanitized_transactions: Vec,
+ pre_balance_info: &mut PreBalanceInfo,
+ (post_balances, post_token_balances): (TransactionBalances, TransactionTokenBalances),
+ starting_transaction_index: Option,
+ ) {
+ if let Some(transaction_status_sender) = &self.transaction_status_sender {
+ let mut transaction_index = starting_transaction_index.unwrap_or_default();
+ let batch_transaction_indexes: Vec<_> = tx_results
+ .execution_results
+ .iter()
+ .map(|result| {
+ if result.was_executed() {
+ let this_transaction_index = transaction_index;
+ saturating_add_assign!(transaction_index, 1);
+ this_transaction_index
+ } else {
+ 0
+ }
+ })
+ .collect();
+ transaction_status_sender.send_transaction_status_batch(
+ bank.clone(),
+ sanitized_transactions,
+ tx_results.execution_results,
+ TransactionBalancesSet::new(
+ std::mem::take(&mut pre_balance_info.native),
+ post_balances,
+ ),
+ TransactionTokenBalancesSet::new(
+ std::mem::take(&mut pre_balance_info.token),
+ post_token_balances,
+ ),
+ tx_results.rent_debits,
+ batch_transaction_indexes,
+ );
+ }
+ }
+}
diff --git a/core/src/bundle_stage/result.rs b/core/src/bundle_stage/result.rs
new file mode 100644
index 0000000000..3370251791
--- /dev/null
+++ b/core/src/bundle_stage/result.rs
@@ -0,0 +1,41 @@
+use {
+ crate::{
+ bundle_stage::bundle_account_locker::BundleAccountLockerError, tip_manager::TipPaymentError,
+ },
+ anchor_lang::error::Error,
+ solana_bundle::bundle_execution::LoadAndExecuteBundleError,
+ solana_poh::poh_recorder::PohRecorderError,
+ thiserror::Error,
+};
+
+pub type BundleExecutionResult = Result;
+
+#[derive(Error, Debug, Clone)]
+pub enum BundleExecutionError {
+ #[error("PoH record error: {0}")]
+ PohRecordError(#[from] PohRecorderError),
+
+ #[error("Bank is done processing")]
+ BankProcessingDone,
+
+ #[error("Execution error: {0}")]
+ ExecutionError(#[from] LoadAndExecuteBundleError),
+
+ #[error("The bundle exceeds the cost model")]
+ ExceedsCostModel,
+
+ #[error("Tip error {0}")]
+ TipError(#[from] TipPaymentError),
+
+ #[error("Error locking bundle")]
+ LockError(#[from] BundleAccountLockerError),
+}
+
+impl From for TipPaymentError {
+ fn from(anchor_err: Error) -> Self {
+ match anchor_err {
+ Error::AnchorError(e) => Self::AnchorError(e.error_msg),
+ Error::ProgramError(e) => Self::AnchorError(e.to_string()),
+ }
+ }
+}
diff --git a/core/src/consensus_cache_updater.rs b/core/src/consensus_cache_updater.rs
new file mode 100644
index 0000000000..1bd75554c0
--- /dev/null
+++ b/core/src/consensus_cache_updater.rs
@@ -0,0 +1,52 @@
+use {
+ solana_runtime::bank::Bank,
+ solana_sdk::{clock::Epoch, pubkey::Pubkey},
+ std::collections::HashSet,
+};
+
+#[derive(Default)]
+pub(crate) struct ConsensusCacheUpdater {
+ last_epoch_updated: Epoch,
+ consensus_accounts_cache: HashSet,
+}
+
+impl ConsensusCacheUpdater {
+ pub(crate) fn consensus_accounts_cache(&self) -> &HashSet {
+ &self.consensus_accounts_cache
+ }
+
+ /// Builds a HashSet of all consensus related accounts for the Bank's epoch
+ fn get_consensus_accounts(bank: &Bank) -> HashSet {
+ let mut consensus_accounts: HashSet = HashSet::new();
+ if let Some(epoch_stakes) = bank.epoch_stakes(bank.epoch()) {
+ // votes use the following accounts:
+ // - vote_account pubkey: writeable
+ // - authorized_voter_pubkey: read-only
+ // - node_keypair pubkey: payer (writeable)
+ let node_id_vote_accounts = epoch_stakes.node_id_to_vote_accounts();
+
+ let vote_accounts = node_id_vote_accounts
+ .values()
+ .flat_map(|v| v.vote_accounts.clone());
+
+ // vote_account
+ consensus_accounts.extend(vote_accounts.into_iter());
+ // authorized_voter_pubkey
+ consensus_accounts.extend(epoch_stakes.epoch_authorized_voters().keys());
+ // node_keypair
+ consensus_accounts.extend(epoch_stakes.node_id_to_vote_accounts().keys());
+ }
+ consensus_accounts
+ }
+
+ /// Updates consensus-related accounts on epoch boundaries
+ pub(crate) fn maybe_update(&mut self, bank: &Bank) -> bool {
+ if bank.epoch() > self.last_epoch_updated {
+ self.consensus_accounts_cache = Self::get_consensus_accounts(bank);
+ self.last_epoch_updated = bank.epoch();
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/core/src/immutable_deserialized_bundle.rs b/core/src/immutable_deserialized_bundle.rs
new file mode 100644
index 0000000000..b3d82741fd
--- /dev/null
+++ b/core/src/immutable_deserialized_bundle.rs
@@ -0,0 +1,483 @@
+use {
+ crate::{
+ immutable_deserialized_packet::ImmutableDeserializedPacket, packet_bundle::PacketBundle,
+ },
+ solana_perf::sigverify::verify_packet,
+ solana_runtime::{bank::Bank, transaction_error_metrics::TransactionErrorMetrics},
+ solana_sdk::{
+ bundle::SanitizedBundle, clock::MAX_PROCESSING_AGE, pubkey::Pubkey, signature::Signature,
+ transaction::SanitizedTransaction,
+ },
+ std::{
+ collections::{hash_map::RandomState, HashSet},
+ iter::repeat,
+ },
+ thiserror::Error,
+};
+
+#[derive(Debug, Error, Eq, PartialEq)]
+pub enum DeserializedBundleError {
+ #[error("FailedToSerializePacket")]
+ FailedToSerializePacket,
+
+ #[error("EmptyBatch")]
+ EmptyBatch,
+
+ #[error("TooManyPackets")]
+ TooManyPackets,
+
+ #[error("MarkedDiscard")]
+ MarkedDiscard,
+
+ #[error("SignatureVerificationFailure")]
+ SignatureVerificationFailure,
+
+ #[error("Bank is in vote-only mode")]
+ VoteOnlyMode,
+
+ #[error("Bundle mentions blacklisted account")]
+ BlacklistedAccount,
+
+ #[error("Bundle contains a transaction that failed to serialize")]
+ FailedToSerializeTransaction,
+
+ #[error("Bundle contains a duplicate transaction")]
+ DuplicateTransaction,
+
+ #[error("Bundle failed check_transactions")]
+ FailedCheckTransactions,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub struct ImmutableDeserializedBundle {
+ bundle_id: String,
+ packets: Vec,
+}
+
+impl ImmutableDeserializedBundle {
+ pub fn new(
+ bundle: &mut PacketBundle,
+ max_len: Option,
+ ) -> Result {
+ // Checks: non-zero, less than some length, marked for discard, signature verification failed, failed to sanitize to
+ // ImmutableDeserializedPacket
+ if bundle.batch.is_empty() {
+ return Err(DeserializedBundleError::EmptyBatch);
+ }
+ if max_len
+ .map(|max_len| bundle.batch.len() > max_len)
+ .unwrap_or(false)
+ {
+ return Err(DeserializedBundleError::TooManyPackets);
+ }
+ if bundle.batch.iter().any(|p| p.meta().discard()) {
+ return Err(DeserializedBundleError::MarkedDiscard);
+ }
+ if bundle.batch.iter_mut().any(|p| !verify_packet(p, false)) {
+ return Err(DeserializedBundleError::SignatureVerificationFailure);
+ }
+
+ let immutable_packets: Vec<_> = bundle
+ .batch
+ .iter()
+ .filter_map(|p| ImmutableDeserializedPacket::new(p.clone()).ok())
+ .collect();
+
+ if bundle.batch.len() != immutable_packets.len() {
+ return Err(DeserializedBundleError::FailedToSerializePacket);
+ }
+
+ Ok(Self {
+ bundle_id: bundle.bundle_id.clone(),
+ packets: immutable_packets,
+ })
+ }
+
+ #[allow(clippy::len_without_is_empty)]
+ pub fn len(&self) -> usize {
+ self.packets.len()
+ }
+
+ pub fn bundle_id(&self) -> &str {
+ &self.bundle_id
+ }
+
+ /// A bundle has the following requirements:
+ /// - all transactions must be sanitiz-able
+ /// - no duplicate signatures
+ /// - must not contain a blacklisted account
+ /// - can't already be processed or contain a bad blockhash
+ pub fn build_sanitized_bundle(
+ &self,
+ bank: &Bank,
+ blacklisted_accounts: &HashSet,
+ transaction_error_metrics: &mut TransactionErrorMetrics,
+ ) -> Result {
+ if bank.vote_only_bank() {
+ return Err(DeserializedBundleError::VoteOnlyMode);
+ }
+
+ let transactions: Vec = self
+ .packets
+ .iter()
+ .filter_map(|p| {
+ p.build_sanitized_transaction(&bank.feature_set, bank.vote_only_bank(), bank)
+ })
+ .collect();
+
+ if self.packets.len() != transactions.len() {
+ return Err(DeserializedBundleError::FailedToSerializeTransaction);
+ }
+
+ let unique_signatures: HashSet<&Signature, RandomState> =
+ HashSet::from_iter(transactions.iter().map(|tx| tx.signature()));
+ if unique_signatures.len() != transactions.len() {
+ return Err(DeserializedBundleError::DuplicateTransaction);
+ }
+
+ let contains_blacklisted_account = transactions.iter().any(|tx| {
+ tx.message()
+ .account_keys()
+ .iter()
+ .any(|acc| blacklisted_accounts.contains(acc))
+ });
+
+ if contains_blacklisted_account {
+ return Err(DeserializedBundleError::BlacklistedAccount);
+ }
+
+ // assume everything locks okay to check for already-processed transaction or expired/invalid blockhash
+ let lock_results: Vec<_> = repeat(Ok(())).take(transactions.len()).collect();
+ let check_results = bank.check_transactions(
+ &transactions,
+ &lock_results,
+ MAX_PROCESSING_AGE,
+ transaction_error_metrics,
+ );
+
+ if check_results.iter().any(|r| r.0.is_err()) {
+ return Err(DeserializedBundleError::FailedCheckTransactions);
+ }
+
+ Ok(SanitizedBundle {
+ transactions,
+ bundle_id: self.bundle_id.clone(),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::{
+ immutable_deserialized_bundle::{DeserializedBundleError, ImmutableDeserializedBundle},
+ packet_bundle::PacketBundle,
+ },
+ solana_client::rpc_client::SerializableTransaction,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_runtime::{
+ bank::{Bank, NewBankOptions},
+ genesis_utils::GenesisConfigInfo,
+ transaction_error_metrics::TransactionErrorMetrics,
+ },
+ solana_sdk::{
+ hash::Hash,
+ packet::Packet,
+ pubkey::Pubkey,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ },
+ std::{collections::HashSet, sync::Arc},
+ };
+
+ /// Happy case
+ #[test]
+ fn test_simple_get_sanitized_bundle() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let tx1 = transfer(&mint_keypair, &kp.pubkey(), 501, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![
+ Packet::from_data(None, &tx0).unwrap(),
+ Packet::from_data(None, &tx1).unwrap(),
+ ]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ let sanitized_bundle = bundle
+ .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors)
+ .unwrap();
+ assert_eq!(sanitized_bundle.transactions.len(), 2);
+ assert_eq!(
+ sanitized_bundle.transactions[0].signature(),
+ tx0.get_signature()
+ );
+ assert_eq!(
+ sanitized_bundle.transactions[1].signature(),
+ tx1.get_signature()
+ );
+ }
+
+ #[test]
+ fn test_empty_batch_fails_to_init() {
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![]),
+ bundle_id: String::default(),
+ },
+ None,
+ ),
+ Err(DeserializedBundleError::EmptyBatch)
+ );
+ }
+
+ #[test]
+ fn test_too_many_packets_fails_to_init() {
+ let kp = Keypair::new();
+
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(
+ (0..10)
+ .map(|i| {
+ Packet::from_data(
+ None,
+ transfer(&kp, &kp.pubkey(), i, Hash::default()),
+ )
+ .unwrap()
+ })
+ .collect()
+ ),
+ bundle_id: String::default(),
+ },
+ Some(5),
+ ),
+ Err(DeserializedBundleError::TooManyPackets)
+ );
+ }
+
+ #[test]
+ fn test_packets_marked_discard_fails_to_init() {
+ let kp = Keypair::new();
+
+ let mut packet =
+ Packet::from_data(None, transfer(&kp, &kp.pubkey(), 100, Hash::default())).unwrap();
+ packet.meta_mut().set_discard(true);
+
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![packet]),
+ bundle_id: String::default(),
+ },
+ Some(5),
+ ),
+ Err(DeserializedBundleError::MarkedDiscard)
+ );
+ }
+
+ #[test]
+ fn test_bad_signature_fails_to_init() {
+ let kp0 = Keypair::new();
+ let kp1 = Keypair::new();
+
+ let mut tx0 = transfer(&kp0, &kp0.pubkey(), 100, Hash::default());
+ let tx1 = transfer(&kp1, &kp0.pubkey(), 100, Hash::default());
+ tx0.signatures = tx1.signatures;
+
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None
+ ),
+ Err(DeserializedBundleError::SignatureVerificationFailure)
+ );
+ }
+
+ #[test]
+ fn test_vote_only_bank_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let parent = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+ let vote_only_bank = Arc::new(Bank::new_from_parent_with_options(
+ &parent,
+ &Pubkey::new_unique(),
+ 1,
+ NewBankOptions {
+ vote_only_bank: true,
+ },
+ ));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(
+ &vote_only_bank,
+ &HashSet::default(),
+ &mut transaction_errors
+ ),
+ Err(DeserializedBundleError::VoteOnlyMode)
+ );
+ }
+
+ #[test]
+ fn test_duplicate_signature_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![
+ Packet::from_data(None, &tx0).unwrap(),
+ Packet::from_data(None, &tx0).unwrap(),
+ ]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors),
+ Err(DeserializedBundleError::DuplicateTransaction)
+ );
+ }
+
+ #[test]
+ fn test_blacklisted_account_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(
+ &bank,
+ &HashSet::from([kp.pubkey()]),
+ &mut transaction_errors
+ ),
+ Err(DeserializedBundleError::BlacklistedAccount)
+ );
+ }
+
+ #[test]
+ fn test_already_processed_tx_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ bank.process_transaction(&tx0).unwrap();
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors),
+ Err(DeserializedBundleError::FailedCheckTransactions)
+ );
+ }
+
+ #[test]
+ fn test_bad_blockhash_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, Hash::default());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors),
+ Err(DeserializedBundleError::FailedCheckTransactions)
+ );
+ }
+}
diff --git a/core/src/latest_unprocessed_votes.rs b/core/src/latest_unprocessed_votes.rs
index db606a4a2b..8dde88be74 100644
--- a/core/src/latest_unprocessed_votes.rs
+++ b/core/src/latest_unprocessed_votes.rs
@@ -136,7 +136,7 @@ pub(crate) fn weighted_random_order_by_stake<'a>(
}
#[derive(Default, Debug)]
-pub(crate) struct VoteBatchInsertionMetrics {
+pub struct VoteBatchInsertionMetrics {
pub(crate) num_dropped_gossip: usize,
pub(crate) num_dropped_tpu: usize,
}
diff --git a/core/src/lib.rs b/core/src/lib.rs
index 6747732231..988142e06a 100644
--- a/core/src/lib.rs
+++ b/core/src/lib.rs
@@ -14,6 +14,7 @@ pub mod ancestor_hashes_service;
pub mod banking_stage;
pub mod banking_trace;
pub mod broadcast_stage;
+pub mod bundle_stage;
pub mod cache_block_meta_service;
pub mod cluster_info_vote_listener;
pub mod cluster_nodes;
@@ -23,6 +24,7 @@ pub mod cluster_slots_service;
pub mod commitment_service;
pub mod completed_data_sets_service;
pub mod consensus;
+pub mod consensus_cache_updater;
pub mod cost_update_service;
pub mod drop_bank_service;
pub mod duplicate_repair_status;
@@ -31,6 +33,7 @@ pub mod fork_choice;
pub mod forward_packet_batches_by_accounts;
pub mod gen_keys;
pub mod heaviest_subtree_fork_choice;
+pub mod immutable_deserialized_bundle;
pub mod immutable_deserialized_packet;
mod latest_unprocessed_votes;
pub mod latest_validator_votes_for_frozen_banks;
@@ -42,11 +45,13 @@ pub mod multi_iterator_scanner;
pub mod next_leader;
pub mod optimistic_confirmation_verifier;
pub mod outstanding_requests;
+pub mod packet_bundle;
pub mod packet_deserializer;
pub mod packet_threshold;
pub mod poh_timing_report_service;
pub mod poh_timing_reporter;
pub mod progress_map;
+pub mod proxy;
pub mod qos_service;
pub mod read_write_account_set;
pub mod repair_generic_traversal;
@@ -70,6 +75,7 @@ pub mod snapshot_packager_service;
pub mod staked_nodes_updater_service;
pub mod stats_reporter_service;
pub mod system_monitor_service;
+pub mod tip_manager;
mod tower1_14_11;
mod tower1_7_14;
pub mod tower_storage;
@@ -107,3 +113,41 @@ extern crate solana_frozen_abi_macro;
#[cfg(test)]
#[macro_use]
extern crate matches;
+
+use {
+ solana_sdk::packet::{Meta, Packet, PacketFlags, PACKET_DATA_SIZE},
+ std::{
+ cmp::min,
+ net::{IpAddr, Ipv4Addr},
+ },
+};
+
+const UNKNOWN_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
+
+// NOTE: last profiled at around 180ns
+pub fn proto_packet_to_packet(p: jito_protos::proto::packet::Packet) -> Packet {
+ let mut data = [0; PACKET_DATA_SIZE];
+ let copy_len = min(data.len(), p.data.len());
+ data[..copy_len].copy_from_slice(&p.data[..copy_len]);
+ let mut packet = Packet::new(data, Meta::default());
+ if let Some(meta) = p.meta {
+ packet.meta_mut().size = meta.size as usize;
+ packet.meta_mut().addr = meta.addr.parse().unwrap_or(UNKNOWN_IP);
+ packet.meta_mut().port = meta.port as u16;
+ if let Some(flags) = meta.flags {
+ if flags.simple_vote_tx {
+ packet.meta_mut().flags.insert(PacketFlags::SIMPLE_VOTE_TX);
+ }
+ if flags.forwarded {
+ packet.meta_mut().flags.insert(PacketFlags::FORWARDED);
+ }
+ if flags.tracer_packet {
+ packet.meta_mut().flags.insert(PacketFlags::TRACER_PACKET);
+ }
+ if flags.repair {
+ packet.meta_mut().flags.insert(PacketFlags::REPAIR);
+ }
+ }
+ }
+ packet
+}
diff --git a/core/src/packet_bundle.rs b/core/src/packet_bundle.rs
new file mode 100644
index 0000000000..2158f37414
--- /dev/null
+++ b/core/src/packet_bundle.rs
@@ -0,0 +1,7 @@
+use solana_perf::packet::PacketBatch;
+
+#[derive(Clone, Debug)]
+pub struct PacketBundle {
+ pub batch: PacketBatch,
+ pub bundle_id: String,
+}
diff --git a/core/src/proxy/auth.rs b/core/src/proxy/auth.rs
new file mode 100644
index 0000000000..39821e12ef
--- /dev/null
+++ b/core/src/proxy/auth.rs
@@ -0,0 +1,185 @@
+use {
+ crate::proxy::ProxyError,
+ chrono::Utc,
+ jito_protos::proto::auth::{
+ auth_service_client::AuthServiceClient, GenerateAuthChallengeRequest,
+ GenerateAuthTokensRequest, RefreshAccessTokenRequest, Role, Token,
+ },
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_sdk::signature::{Keypair, Signer},
+ std::{
+ sync::{Arc, Mutex},
+ time::Duration,
+ },
+ tokio::time::timeout,
+ tonic::{service::Interceptor, transport::Channel, Code, Request, Status},
+};
+
+/// Interceptor responsible for adding the access token to request headers.
+pub(crate) struct AuthInterceptor {
+ /// The token added to each request header.
+ access_token: Arc>,
+}
+
+impl AuthInterceptor {
+ pub(crate) fn new(access_token: Arc>) -> Self {
+ Self { access_token }
+ }
+}
+
+impl Interceptor for AuthInterceptor {
+ fn call(&mut self, mut request: Request<()>) -> Result, Status> {
+ request.metadata_mut().insert(
+ "authorization",
+ format!("Bearer {}", self.access_token.lock().unwrap().value)
+ .parse()
+ .unwrap(),
+ );
+
+ Ok(request)
+ }
+}
+
+/// Generates an auth challenge then generates and returns validated auth tokens.
+pub async fn generate_auth_tokens(
+ auth_service_client: &mut AuthServiceClient,
+ // used to sign challenges
+ keypair: &Keypair,
+) -> crate::proxy::Result<(
+ Token, /* access_token */
+ Token, /* refresh_token */
+)> {
+ debug!("generate_auth_challenge");
+ let challenge_response = auth_service_client
+ .generate_auth_challenge(GenerateAuthChallengeRequest {
+ role: Role::Validator as i32,
+ pubkey: keypair.pubkey().as_ref().to_vec(),
+ })
+ .await
+ .map_err(|e: Status| {
+ if e.code() == Code::PermissionDenied {
+ ProxyError::AuthenticationPermissionDenied
+ } else {
+ ProxyError::AuthenticationError(e.to_string())
+ }
+ })?;
+
+ let formatted_challenge = format!(
+ "{}-{}",
+ keypair.pubkey(),
+ challenge_response.into_inner().challenge
+ );
+
+ let signed_challenge = keypair
+ .sign_message(formatted_challenge.as_bytes())
+ .as_ref()
+ .to_vec();
+
+ debug!(
+ "formatted_challenge: {} signed_challenge: {:?}",
+ formatted_challenge, signed_challenge
+ );
+
+ debug!("generate_auth_tokens");
+ let auth_tokens = auth_service_client
+ .generate_auth_tokens(GenerateAuthTokensRequest {
+ challenge: formatted_challenge,
+ client_pubkey: keypair.pubkey().as_ref().to_vec(),
+ signed_challenge,
+ })
+ .await
+ .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?;
+
+ let inner = auth_tokens.into_inner();
+ let access_token = get_validated_token(inner.access_token)?;
+ let refresh_token = get_validated_token(inner.refresh_token)?;
+
+ Ok((access_token, refresh_token))
+}
+
+/// Tries to refresh the access token or run full-reauth if needed.
+pub async fn maybe_refresh_auth_tokens(
+ auth_service_client: &mut AuthServiceClient,
+ access_token: &Arc>,
+ refresh_token: &Token,
+ cluster_info: &Arc,
+ connection_timeout: &Duration,
+ refresh_within_s: u64,
+) -> crate::proxy::Result<(
+ Option, // access token
+ Option, // refresh token
+)> {
+ let access_token_expiry: u64 = access_token
+ .lock()
+ .unwrap()
+ .expires_at_utc
+ .as_ref()
+ .map(|ts| ts.seconds as u64)
+ .unwrap_or_default();
+ let refresh_token_expiry: u64 = refresh_token
+ .expires_at_utc
+ .as_ref()
+ .map(|ts| ts.seconds as u64)
+ .unwrap_or_default();
+
+ let now = Utc::now().timestamp() as u64;
+
+ let should_refresh_access =
+ access_token_expiry.checked_sub(now).unwrap_or_default() <= refresh_within_s;
+ let should_generate_new_tokens =
+ refresh_token_expiry.checked_sub(now).unwrap_or_default() <= refresh_within_s;
+
+ if should_generate_new_tokens {
+ let kp = cluster_info.keypair().clone();
+
+ let (new_access_token, new_refresh_token) = timeout(
+ *connection_timeout,
+ generate_auth_tokens(auth_service_client, kp.as_ref()),
+ )
+ .await
+ .map_err(|_| ProxyError::MethodTimeout("generate_auth_tokens".to_string()))?
+ .map_err(|e| ProxyError::MethodError(e.to_string()))?;
+
+ return Ok((Some(new_access_token), Some(new_refresh_token)));
+ } else if should_refresh_access {
+ let new_access_token = timeout(
+ *connection_timeout,
+ refresh_access_token(auth_service_client, refresh_token),
+ )
+ .await
+ .map_err(|_| ProxyError::MethodTimeout("refresh_access_token".to_string()))?
+ .map_err(|e| ProxyError::MethodError(e.to_string()))?;
+
+ return Ok((Some(new_access_token), None));
+ }
+
+ Ok((None, None))
+}
+
+pub async fn refresh_access_token(
+ auth_service_client: &mut AuthServiceClient,
+ refresh_token: &Token,
+) -> crate::proxy::Result {
+ let response = auth_service_client
+ .refresh_access_token(RefreshAccessTokenRequest {
+ refresh_token: refresh_token.value.clone(),
+ })
+ .await
+ .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?;
+ get_validated_token(response.into_inner().access_token)
+}
+
+/// An invalid token is one where any of its fields are None or the token itself is None.
+/// Performs the necessary validations on the auth tokens before returning,
+/// i.e. it is safe to call .unwrap() on the token fields from the call-site.
+fn get_validated_token(maybe_token: Option) -> crate::proxy::Result {
+ let token = maybe_token
+ .ok_or_else(|| ProxyError::BadAuthenticationToken("received a null token".to_string()))?;
+ if token.expires_at_utc.is_none() {
+ Err(ProxyError::BadAuthenticationToken(
+ "expires_at_utc field is null".to_string(),
+ ))
+ } else {
+ Ok(token)
+ }
+}
diff --git a/core/src/proxy/block_engine_stage.rs b/core/src/proxy/block_engine_stage.rs
new file mode 100644
index 0000000000..4128f5379f
--- /dev/null
+++ b/core/src/proxy/block_engine_stage.rs
@@ -0,0 +1,533 @@
+//! Maintains a connection to the Block Engine.
+//!
+//! The Block Engine is responsible for the following:
+//! - Acts as a system that sends high profit bundles and transactions to a validator.
+//! - Sends transactions and bundles to the validator.
+use {
+ crate::{
+ banking_trace::BankingPacketSender,
+ packet_bundle::PacketBundle,
+ proto_packet_to_packet,
+ proxy::{
+ auth::{generate_auth_tokens, maybe_refresh_auth_tokens, AuthInterceptor},
+ ProxyError,
+ },
+ },
+ crossbeam_channel::Sender,
+ jito_protos::proto::{
+ auth::{auth_service_client::AuthServiceClient, Token},
+ block_engine::{
+ self, block_engine_validator_client::BlockEngineValidatorClient,
+ BlockBuilderFeeInfoRequest,
+ },
+ },
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_perf::packet::PacketBatch,
+ solana_sdk::{
+ pubkey::Pubkey, saturating_add_assign, signature::Signer, signer::keypair::Keypair,
+ },
+ std::{
+ str::FromStr,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, Mutex,
+ },
+ thread::{self, Builder, JoinHandle},
+ time::Duration,
+ },
+ tokio::time::{interval, sleep, timeout},
+ tonic::{
+ codegen::InterceptedService,
+ transport::{Channel, Endpoint},
+ Status, Streaming,
+ },
+};
+
+const CONNECTION_TIMEOUT_S: u64 = 10;
+const CONNECTION_BACKOFF_S: u64 = 5;
+
+#[derive(Default)]
+struct BlockEngineStageStats {
+ num_bundles: u64,
+ num_bundle_packets: u64,
+ num_packets: u64,
+ num_empty_packets: u64,
+}
+
+impl BlockEngineStageStats {
+ pub(crate) fn report(&self) {
+ datapoint_info!(
+ "block_engine_stage-stats",
+ ("num_bundles", self.num_bundles, i64),
+ ("num_bundle_packets", self.num_bundle_packets, i64),
+ ("num_packets", self.num_packets, i64),
+ ("num_empty_packets", self.num_empty_packets, i64)
+ );
+ }
+}
+
+pub struct BlockBuilderFeeInfo {
+ pub block_builder: Pubkey,
+ pub block_builder_commission: u64,
+}
+
+#[derive(Clone, Debug, Default, PartialEq, Eq)]
+pub struct BlockEngineConfig {
+ /// Block Engine URL
+ pub block_engine_url: String,
+
+ /// If set then it will be assumed the backend verified packets so signature verification will be bypassed in the validator.
+ pub trust_packets: bool,
+}
+
+pub struct BlockEngineStage {
+ t_hdls: Vec>,
+}
+
+impl BlockEngineStage {
+ pub fn new(
+ block_engine_config: Arc>,
+ // Channel that bundles get piped through.
+ bundle_tx: Sender>,
+ // The keypair stored here is used to sign auth challenges.
+ cluster_info: Arc,
+ // Channel that non-trusted packets get piped through.
+ packet_tx: Sender,
+ // Channel that trusted packets get piped through.
+ banking_packet_sender: BankingPacketSender,
+ exit: Arc,
+ block_builder_fee_info: &Arc>,
+ ) -> Self {
+ let block_builder_fee_info = block_builder_fee_info.clone();
+
+ let thread = Builder::new()
+ .name("block-engine-stage".to_string())
+ .spawn(move || {
+ let rt = tokio::runtime::Builder::new_multi_thread()
+ .enable_all()
+ .build()
+ .unwrap();
+ rt.block_on(Self::start(
+ block_engine_config,
+ cluster_info,
+ bundle_tx,
+ packet_tx,
+ banking_packet_sender,
+ exit,
+ block_builder_fee_info,
+ ));
+ })
+ .unwrap();
+
+ Self {
+ t_hdls: vec![thread],
+ }
+ }
+
+ pub fn join(self) -> thread::Result<()> {
+ for t in self.t_hdls {
+ t.join()?;
+ }
+ Ok(())
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ async fn start(
+ block_engine_config: Arc>,
+ cluster_info: Arc,
+ bundle_tx: Sender>,
+ packet_tx: Sender,
+ banking_packet_sender: BankingPacketSender,
+ exit: Arc,
+ block_builder_fee_info: Arc>,
+ ) {
+ const CONNECTION_TIMEOUT: Duration = Duration::from_secs(CONNECTION_TIMEOUT_S);
+ const CONNECTION_BACKOFF: Duration = Duration::from_secs(CONNECTION_BACKOFF_S);
+ let mut error_count: u64 = 0;
+
+ while !exit.load(Ordering::Relaxed) {
+ // Wait until a valid config is supplied (either initially or by admin rpc)
+ // Use if!/else here to avoid extra CONNECTION_BACKOFF wait on successful termination
+ if !Self::is_valid_block_engine_config(&block_engine_config.lock().unwrap()) {
+ sleep(CONNECTION_BACKOFF).await;
+ } else if let Err(e) = Self::connect_auth_and_stream(
+ &block_engine_config,
+ &cluster_info,
+ &bundle_tx,
+ &packet_tx,
+ &banking_packet_sender,
+ &exit,
+ &block_builder_fee_info,
+ &CONNECTION_TIMEOUT,
+ )
+ .await
+ {
+ match e {
+ // This error is frequent on hot spares, and the parsed string does not work
+ // with datapoints (incorrect escaping).
+ ProxyError::AuthenticationPermissionDenied => {
+ warn!("block engine permission denied. not on leader schedule. ignore if hot-spare.")
+ }
+ e => {
+ error_count += 1;
+ datapoint_warn!(
+ "block_engine_stage-proxy_error",
+ ("count", error_count, i64),
+ ("error", e.to_string(), String),
+ );
+ }
+ }
+ sleep(CONNECTION_BACKOFF).await;
+ }
+ }
+ }
+
+ async fn connect_auth_and_stream(
+ block_engine_config: &Arc>,
+ cluster_info: &Arc,
+ bundle_tx: &Sender>,
+ packet_tx: &Sender,
+ banking_packet_sender: &BankingPacketSender,
+ exit: &Arc,
+ block_builder_fee_info: &Arc>,
+ connection_timeout: &Duration,
+ ) -> crate::proxy::Result<()> {
+ // Get a copy of configs here in case they have changed at runtime
+ let keypair = cluster_info.keypair().clone();
+ let local_config = block_engine_config.lock().unwrap().clone();
+
+ let mut backend_endpoint = Endpoint::from_shared(local_config.block_engine_url.clone())
+ .map_err(|_| {
+ ProxyError::BlockEngineConnectionError(format!(
+ "invalid block engine url value: {}",
+ local_config.block_engine_url
+ ))
+ })?
+ .tcp_keepalive(Some(Duration::from_secs(60)));
+ if local_config.block_engine_url.starts_with("https") {
+ backend_endpoint = backend_endpoint
+ .tls_config(tonic::transport::ClientTlsConfig::new())
+ .map_err(|_| {
+ ProxyError::BlockEngineConnectionError(
+ "failed to set tls_config for block engine service".to_string(),
+ )
+ })?;
+ }
+
+ debug!("connecting to auth: {}", local_config.block_engine_url);
+ let auth_channel = timeout(*connection_timeout, backend_endpoint.connect())
+ .await
+ .map_err(|_| ProxyError::AuthenticationConnectionTimeout)?
+ .map_err(|e| ProxyError::AuthenticationConnectionError(e.to_string()))?;
+
+ let mut auth_client = AuthServiceClient::new(auth_channel);
+
+ debug!("generating authentication token");
+ let (access_token, refresh_token) = timeout(
+ *connection_timeout,
+ generate_auth_tokens(&mut auth_client, &keypair),
+ )
+ .await
+ .map_err(|_| ProxyError::AuthenticationTimeout)??;
+
+ datapoint_info!(
+ "block_engine_stage-tokens_generated",
+ ("url", local_config.block_engine_url, String),
+ ("count", 1, i64),
+ );
+
+ debug!(
+ "connecting to block engine: {}",
+ local_config.block_engine_url
+ );
+ let block_engine_channel = timeout(*connection_timeout, backend_endpoint.connect())
+ .await
+ .map_err(|_| ProxyError::BlockEngineConnectionTimeout)?
+ .map_err(|e| ProxyError::BlockEngineConnectionError(e.to_string()))?;
+
+ let access_token = Arc::new(Mutex::new(access_token));
+ let block_engine_client = BlockEngineValidatorClient::with_interceptor(
+ block_engine_channel,
+ AuthInterceptor::new(access_token.clone()),
+ );
+
+ Self::start_consuming_block_engine_bundles_and_packets(
+ bundle_tx,
+ block_engine_client,
+ packet_tx,
+ &local_config,
+ block_engine_config,
+ banking_packet_sender,
+ exit,
+ block_builder_fee_info,
+ auth_client,
+ access_token,
+ refresh_token,
+ connection_timeout,
+ keypair,
+ cluster_info,
+ )
+ .await
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ async fn start_consuming_block_engine_bundles_and_packets(
+ bundle_tx: &Sender>,
+ mut client: BlockEngineValidatorClient>,
+ packet_tx: &Sender,
+ local_config: &BlockEngineConfig, // local copy of config with current connections
+ global_config: &Arc>, // guarded reference for detecting run-time updates
+ banking_packet_sender: &BankingPacketSender,
+ exit: &Arc,
+ block_builder_fee_info: &Arc>,
+ auth_client: AuthServiceClient