diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index 427a039f7d63a..2d579bc09668c 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -12,8 +12,9 @@ use aptos_aggregator::{ delayed_change::DelayedChange, delta_change_set::DeltaOp, resolver::TAggregatorV1View, }; use aptos_block_executor::{ - cross_block_caches::CachedAptosEnvironment, errors::BlockExecutionError, - executor::BlockExecutor, task::TransactionOutput as BlockExecutorTransactionOutput, + cross_block_caches::get_environment_with_delayed_field_optimization_enabled, + errors::BlockExecutionError, executor::BlockExecutor, + task::TransactionOutput as BlockExecutorTransactionOutput, txn_commit_hook::TransactionCommitHook, types::InputOutputKey, }; use aptos_infallible::Mutex; @@ -416,8 +417,7 @@ impl BlockAptosVM { ExecutableTestType, >::new(config, executor_thread_pool, transaction_commit_listener); - let environment = - CachedAptosEnvironment::get_with_delayed_field_optimization_enabled(state_view)?; + let environment = get_environment_with_delayed_field_optimization_enabled(state_view)?; let ret = executor.execute_block(environment, signature_verified_block, state_view); match ret { Ok(block_output) => { diff --git a/aptos-move/block-executor/src/captured_reads.rs b/aptos-move/block-executor/src/captured_reads.rs index 82b02da5419c2..87b91e88de928 100644 --- a/aptos-move/block-executor/src/captured_reads.rs +++ b/aptos-move/block-executor/src/captured_reads.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - cross_block_caches::CrossBlockModuleCache, types::InputOutputKey, + cross_block_caches::ImmutableModuleCache, types::InputOutputKey, value_exchange::filter_value_for_exchange, }; use anyhow::bail; @@ -24,15 +24,12 @@ use aptos_types::{ executable::ModulePath, state_store::state_value::StateValueMetadata, transaction::BlockExecutableTransaction as Transaction, - vm::modules::AptosModuleExtension, write_set::TransactionWrite, }; use aptos_vm_types::resolver::ResourceGroupSize; use derivative::Derivative; -use move_binary_format::CompiledModule; -use move_core_types::{language_storage::ModuleId, value::MoveTypeLayout}; -use move_vm_runtime::Module; -use move_vm_types::code::{ModuleCode, SyncModuleCache}; +use move_core_types::value::MoveTypeLayout; +use move_vm_types::code::{ModuleCode, SyncModuleCache, WithAddress, WithName}; use std::{ collections::{ hash_map::{ @@ -41,6 +38,8 @@ use std::{ }, BTreeMap, HashMap, HashSet, }, + hash::Hash, + ops::Deref, sync::Arc, }; @@ -296,13 +295,11 @@ impl DelayedFieldRead { /// Represents a module read, either from immutable cross-block cache, or from code [SyncCodeCache] /// used by block executor (per-block cache). This way, when transaction needs to read a module /// from [SyncCodeCache] it can first check the read-set here. -enum ModuleRead { +enum ModuleRead { /// Read from the cross-block module cache. GlobalCache, /// Read from per-block cache ([SyncCodeCache]) used by parallel execution. - PerBlockCache( - Option>>>, - ), + PerBlockCache(Option>>>), } /// Represents a result of a read from [CapturedReads] when they are used as the transaction-level @@ -322,14 +319,14 @@ pub enum CacheRead { /// read that has a kind <= already captured read (for that key / tag). #[derive(Derivative)] #[derivative(Default(bound = "", new = "true"))] -pub(crate) struct CapturedReads { +pub(crate) struct CapturedReads { data_reads: HashMap>, group_reads: HashMap>, delayed_field_reads: HashMap, #[deprecated] pub(crate) deprecated_module_reads: Vec, - module_reads: hashbrown::HashMap, + module_reads: hashbrown::HashMap>, /// If there is a speculative failure (e.g. delta application failure, or an observed /// inconsistency), the transaction output is irrelevant (must be discarded and transaction @@ -338,7 +335,7 @@ pub(crate) struct CapturedReads { /// require different validation behavior (delayed fields are validated commit-time). delayed_field_speculative_failure: bool, non_delayed_field_speculative_failure: bool, - /// Set if the invarint on CapturedReads intended use is violated. Leads to an alert + /// Set if the invariant on CapturedReads intended use is violated. Leads to an alert /// and sequential execution fallback. incorrect_use: bool, } @@ -351,7 +348,12 @@ enum UpdateResult { Inconsistency(String), } -impl CapturedReads { +impl CapturedReads +where + T: Transaction, + K: Hash + Eq + Ord + Clone, + VC: Deref>, +{ // Return an iterator over the captured reads. pub(crate) fn get_read_values_with_delayed_fields( &self, @@ -390,8 +392,8 @@ impl CapturedReads { // Given a hashmap entry for a key, incorporate a new DataRead. This checks // consistency and ensures that the most comprehensive read is recorded. - fn update_entry( - entry: Entry>, + fn update_entry( + entry: Entry>, read: DataRead, ) -> UpdateResult { match entry { @@ -612,20 +614,18 @@ impl CapturedReads { } /// Records the read to global cache that spans across multiple blocks. - pub(crate) fn capture_global_cache_read(&mut self, module_id: ModuleId) { - self.module_reads.insert(module_id, ModuleRead::GlobalCache); + pub(crate) fn capture_global_cache_read(&mut self, key: K) { + self.module_reads.insert(key, ModuleRead::GlobalCache); } /// Records the read to per-block level cache. pub(crate) fn capture_per_block_cache_read( &mut self, - module_id: ModuleId, - read: Option< - Arc>>, - >, + key: K, + read: Option>>>, ) { self.module_reads - .insert(module_id, ModuleRead::PerBlockCache(read)); + .insert(key, ModuleRead::PerBlockCache(read)); } /// If the module has been previously read from [SyncCodeCache], returns it. Returns a panic @@ -633,22 +633,14 @@ impl CapturedReads { /// for those). pub(crate) fn get_module_read( &self, - module_id: &ModuleId, - ) -> Result< - CacheRead< - Option>>>, - >, - PanicError, - > { - Ok(match self.module_reads.get(module_id) { + key: &K, + ) -> Result>>>>, PanicError> { + Ok(match self.module_reads.get(key) { Some(ModuleRead::PerBlockCache(read)) => CacheRead::Hit(read.clone()), Some(ModuleRead::GlobalCache) => { - let msg = format!( - "Trying to get the captured read for {}::{} in global cache", - module_id.address(), - module_id.name() - ); - return Err(PanicError::CodeInvariantError(msg)); + return Err(PanicError::CodeInvariantError( + "Global module cache reads do not capture values".to_string(), + )); }, None => CacheRead::Miss, }) @@ -660,22 +652,17 @@ impl CapturedReads { /// 3. Entries that were in per-block cache have the same commit index. pub(crate) fn validate_module_reads( &self, - module_cache: &SyncModuleCache< - ModuleId, - CompiledModule, - Module, - AptosModuleExtension, - Option, - >, + global_module_cache: &ImmutableModuleCache, + per_block_module_cache: &SyncModuleCache>, ) -> bool { if self.non_delayed_field_speculative_failure { return false; } - self.module_reads.iter().all(|(id, read)| match read { - ModuleRead::GlobalCache => CrossBlockModuleCache::is_valid(id), + self.module_reads.iter().all(|(key, read)| match read { + ModuleRead::GlobalCache => global_module_cache.contains_valid(key), ModuleRead::PerBlockCache(previous) => { - let current_version = module_cache.get_module_version(id); + let current_version = per_block_module_cache.get_module_version(key); let previous_version = previous.as_ref().map(|module| module.version()); current_version == previous_version }, @@ -772,6 +759,25 @@ impl CapturedReads { Ok(true) } + pub(crate) fn mark_failure(&mut self, delayed_field_failure: bool) { + if delayed_field_failure { + self.delayed_field_speculative_failure = true; + } else { + self.non_delayed_field_speculative_failure = true; + } + } + + pub(crate) fn mark_incorrect_use(&mut self) { + self.incorrect_use = true; + } +} + +impl CapturedReads +where + T: Transaction, + K: Hash + Eq + Ord + Clone + WithAddress + WithName, + VC: Deref>, +{ pub(crate) fn get_read_summary( &self, ) -> HashSet> { @@ -795,8 +801,8 @@ impl CapturedReads { for key in &self.deprecated_module_reads { ret.insert(InputOutputKey::Resource(key.clone())); } - for id in self.module_reads.keys() { - let key = T::Key::from_address_and_module_name(id.address(), id.name()); + for key in self.module_reads.keys() { + let key = T::Key::from_address_and_module_name(key.address(), key.name()); ret.insert(InputOutputKey::Resource(key)); } @@ -808,36 +814,28 @@ impl CapturedReads { ret } - - pub(crate) fn mark_failure(&mut self, delayed_field_failure: bool) { - if delayed_field_failure { - self.delayed_field_speculative_failure = true; - } else { - self.non_delayed_field_speculative_failure = true; - } - } - - pub(crate) fn mark_incorrect_use(&mut self) { - self.incorrect_use = true; - } } #[derive(Derivative)] #[derivative(Default(bound = "", new = "true"))] -pub(crate) struct UnsyncReadSet { +pub(crate) struct UnsyncReadSet { pub(crate) resource_reads: HashSet, pub(crate) group_reads: HashMap>, pub(crate) delayed_field_reads: HashSet, #[deprecated] pub(crate) deprecated_module_reads: HashSet, - module_reads: HashSet, + module_reads: HashSet, } -impl UnsyncReadSet { +impl UnsyncReadSet +where + T: Transaction, + K: Hash + Eq + Ord + Clone + WithAddress + WithName, +{ /// Captures the module read for sequential execution. - pub(crate) fn capture_module_read(&mut self, module_id: ModuleId) { - self.module_reads.insert(module_id); + pub(crate) fn capture_module_read(&mut self, key: K) { + self.module_reads.insert(key); } pub(crate) fn get_read_summary( @@ -859,8 +857,8 @@ impl UnsyncReadSet { for key in &self.deprecated_module_reads { ret.insert(InputOutputKey::Resource(key.clone())); } - for id in &self.module_reads { - let key = T::Key::from_address_and_module_name(id.address(), id.name()); + for key in &self.module_reads { + let key = T::Key::from_address_and_module_name(key.address(), key.name()); ret.insert(InputOutputKey::Resource(key)); } @@ -877,14 +875,17 @@ mod test { use super::*; use crate::{ proptest_types::types::{raw_metadata, KeyType, MockEvent, ValueType}, - types::test_types::{deserialized_code, module_id, verified_code}, + types::test_types::{mock_deserialized_code, mock_verified_code}, }; use aptos_mvhashmap::{types::StorageVersion, MVHashMap}; use aptos_types::executable::ExecutableTestType; use claims::{ assert_err, assert_gt, assert_matches, assert_none, assert_ok, assert_ok_eq, assert_some_eq, }; - use move_vm_types::{code::ModuleCache, delayed_values::delayed_field_id::DelayedFieldID}; + use move_vm_types::{ + code::{MockDeserializedCode, MockVerifiedCode, ModuleCache}, + delayed_values::delayed_field_id::DelayedFieldID, + }; use test_case::test_case; #[test] @@ -1076,7 +1077,7 @@ mod test { ($m:expr, $x:expr, $y:expr) => {{ let original = $m.get(&$x).cloned().unwrap(); assert_matches!( - CapturedReads::::update_entry($m.entry($x), $y.clone()), + CapturedReads::::update_entry($m.entry($x), $y.clone()), UpdateResult::IncorrectUse(_) ); assert_some_eq!($m.get(&$x), &original); @@ -1087,7 +1088,7 @@ mod test { ($m:expr, $x:expr, $y:expr) => {{ let original = $m.get(&$x).cloned().unwrap(); assert_matches!( - CapturedReads::::update_entry($m.entry($x), $y.clone()), + CapturedReads::::update_entry($m.entry($x), $y.clone()), UpdateResult::Inconsistency(_) ); assert_some_eq!($m.get(&$x), &original); @@ -1097,7 +1098,7 @@ mod test { macro_rules! assert_update { ($m:expr, $x:expr, $y:expr) => {{ assert_matches!( - CapturedReads::::update_entry($m.entry($x), $y.clone()), + CapturedReads::::update_entry($m.entry($x), $y.clone()), UpdateResult::Updated ); assert_some_eq!($m.get(&$x), &$y); @@ -1107,7 +1108,7 @@ mod test { macro_rules! assert_insert { ($m:expr, $x:expr, $y:expr) => {{ assert_matches!( - CapturedReads::::update_entry($m.entry($x), $y.clone()), + CapturedReads::::update_entry($m.entry($x), $y.clone()), UpdateResult::Inserted ); assert_some_eq!($m.get(&$x), &$y); @@ -1274,7 +1275,13 @@ mod test { #[test_case(false)] #[test_case(true)] fn capture_and_get_by_kind(use_tag: bool) { - let mut captured_reads = CapturedReads::::new(); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + u32, + >::new(); let legacy_reads = legacy_reads_by_kind(); let deletion_reads = deletion_reads_by_kind(); let with_metadata_reads = with_metadata_reads_by_kind(); @@ -1302,7 +1309,13 @@ mod test { #[should_panic] #[test] fn metadata_for_group_member() { - let captured_reads = CapturedReads::::new(); + let captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + u32, + >::new(); captured_reads.get_by_kind(&KeyType::(21, false), Some(&10), ReadKind::Metadata); } @@ -1324,7 +1337,13 @@ mod test { #[test_case(false)] #[test_case(true)] fn incorrect_use_flag(use_tag: bool) { - let mut captured_reads = CapturedReads::::new(); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); let legacy_reads = legacy_reads_by_kind(); let deletion_reads = deletion_reads_by_kind(); let with_metadata_reads = with_metadata_reads_by_kind(); @@ -1380,7 +1399,13 @@ mod test { #[test_case(false)] #[test_case(true)] fn speculative_failure_flag(use_tag: bool) { - let mut captured_reads = CapturedReads::::new(); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); let versioned_legacy = DataRead::Versioned( Err(StorageVersion), Arc::new(ValueType::with_len_and_metadata( @@ -1431,7 +1456,13 @@ mod test { assert!(captured_reads.non_delayed_field_speculative_failure); assert!(!captured_reads.delayed_field_speculative_failure); - let mut captured_reads = CapturedReads::::new(); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); captured_reads.non_delayed_field_speculative_failure = false; captured_reads.delayed_field_speculative_failure = false; captured_reads.mark_failure(true); @@ -1459,182 +1490,212 @@ mod test { #[test] fn test_speculative_failure_for_module_reads() { - let mut captured_reads = CapturedReads::::new(); - let mvhashmap = - MVHashMap::, u32, ValueType, ExecutableTestType, DelayedFieldID>::new(); - - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); + let global_module_cache = ImmutableModuleCache::empty(); + let per_block_module_cache = SyncModuleCache::empty(); + + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); captured_reads.mark_failure(true); - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); captured_reads.mark_failure(false); - assert!(!captured_reads.validate_module_reads(mvhashmap.module_cache())); + assert!( + !captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache) + ); } #[test] fn test_global_cache_module_reads_are_not_recorded() { - let mut captured_reads = CapturedReads::::new(); - - let module_id = module_id("a"); - captured_reads.capture_global_cache_read(module_id.clone()); - - let result = captured_reads.get_module_read(&module_id); - assert!(result.is_err()) + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); + + captured_reads.capture_global_cache_read(0); + assert!(captured_reads.get_module_read(&0).is_err()) } #[test] fn test_global_cache_module_reads() { - let mut captured_reads = CapturedReads::::new(); - let mvhashmap = - MVHashMap::, u32, ValueType, ExecutableTestType, DelayedFieldID>::new(); - - let a_id = module_id("a"); - CrossBlockModuleCache::insert(a_id.clone(), verified_code("a", None)); - captured_reads.capture_global_cache_read(a_id.clone()); - - let b_id = module_id("b"); - CrossBlockModuleCache::insert(b_id.clone(), verified_code("b", None)); - captured_reads.capture_global_cache_read(b_id.clone()); - - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); - - // Now, mark one of the cross-module entries invalid. Validations should fail! - CrossBlockModuleCache::mark_invalid(&b_id); - assert!(!captured_reads.validate_module_reads(mvhashmap.module_cache())); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); + let global_module_cache = ImmutableModuleCache::empty(); + let per_block_module_cache = SyncModuleCache::empty(); + + global_module_cache.insert(0, mock_verified_code(0, None)); + captured_reads.capture_global_cache_read(0); + + global_module_cache.insert(1, mock_verified_code(1, None)); + captured_reads.capture_global_cache_read(1); + + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); + + // Now, mark one of the entries in invalid. Validations should fail! + global_module_cache.mark_invalid(&1); + let valid = + captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache); + assert!(!valid); // Without invalid module (and if it is not captured), validation should pass. - CrossBlockModuleCache::remove(&b_id); - captured_reads.module_reads.remove(&b_id); - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); + global_module_cache.remove(&1); + captured_reads.module_reads.remove(&1); + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); // Validation fails if we captured a cross-block module which does not exist anymore. - CrossBlockModuleCache::remove(&a_id); - assert!(!captured_reads.validate_module_reads(mvhashmap.module_cache())); + global_module_cache.remove(&0); + let valid = + captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache); + assert!(!valid); } #[test] fn test_block_cache_module_reads_are_recorded() { - let mut captured_reads = CapturedReads::::new(); - let mvhashmap = - MVHashMap::, u32, ValueType, ExecutableTestType, DelayedFieldID>::new(); - - let a_id = module_id("a"); - let a = deserialized_code("a", Some(2)); - mvhashmap - .module_cache() + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); + let per_block_module_cache: SyncModuleCache = + SyncModuleCache::empty(); + + let a = mock_deserialized_code(0, Some(2)); + per_block_module_cache .insert_deserialized_module( - a_id.clone(), + 0, a.code().deserialized().as_ref().clone(), a.extension().clone(), a.version(), ) .unwrap(); - captured_reads.capture_per_block_cache_read(a_id.clone(), Some(a)); + captured_reads.capture_per_block_cache_read(0, Some(a)); assert!(matches!( - captured_reads.get_module_read(&a_id), + captured_reads.get_module_read(&0), Ok(CacheRead::Hit(Some(_))) )); - let b_id = module_id("b"); - captured_reads.capture_per_block_cache_read(b_id.clone(), None); + captured_reads.capture_per_block_cache_read(1, None); assert!(matches!( - captured_reads.get_module_read(&b_id), + captured_reads.get_module_read(&1), Ok(CacheRead::Hit(None)) )); - let c_id = module_id("c"); assert!(matches!( - captured_reads.get_module_read(&c_id), + captured_reads.get_module_read(&2), Ok(CacheRead::Miss) )); } #[test] fn test_block_cache_module_reads() { - let mut captured_reads = CapturedReads::::new(); - let mvhashmap = - MVHashMap::, u32, ValueType, ExecutableTestType, DelayedFieldID>::new(); - - let a_id = module_id("a"); - let a = deserialized_code("a", Some(10)); - mvhashmap - .module_cache() + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); + let global_module_cache = ImmutableModuleCache::empty(); + let per_block_module_cache = SyncModuleCache::empty(); + + let a = mock_deserialized_code(0, Some(10)); + per_block_module_cache .insert_deserialized_module( - a_id.clone(), + 0, a.code().deserialized().as_ref().clone(), a.extension().clone(), a.version(), ) .unwrap(); - captured_reads.capture_per_block_cache_read(a_id.clone(), Some(a)); - - let b_id = module_id("b"); - captured_reads.capture_per_block_cache_read(b_id.clone(), None); + captured_reads.capture_per_block_cache_read(0, Some(a)); + captured_reads.capture_per_block_cache_read(1, None); - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); - // Entry did not exist before and now exists. - let b = deserialized_code("b", Some(12)); - mvhashmap - .module_cache() + let b = mock_deserialized_code(1, Some(12)); + per_block_module_cache .insert_deserialized_module( - b_id.clone(), + 1, b.code().deserialized().as_ref().clone(), b.extension().clone(), b.version(), ) .unwrap(); - assert!(!captured_reads.validate_module_reads(mvhashmap.module_cache())); + // Entry did not exist before and now exists. + let valid = + captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache); + assert!(!valid); - captured_reads.module_reads.remove(&b_id); - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); + captured_reads.module_reads.remove(&1); + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); // Version has been republished, with a higher transaction index. Should fail validation. - let a = deserialized_code("a", Some(20)); - mvhashmap - .module_cache() + let a = mock_deserialized_code(0, Some(20)); + per_block_module_cache .insert_deserialized_module( - a_id.clone(), + 0, a.code().deserialized().as_ref().clone(), a.extension().clone(), a.version(), ) .unwrap(); - assert!(!captured_reads.validate_module_reads(mvhashmap.module_cache())); + let valid = + captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache); + assert!(!valid); } #[test] fn test_global_and_block_cache_module_reads() { - let mut captured_reads = CapturedReads::::new(); - let mvhashmap = - MVHashMap::, u32, ValueType, ExecutableTestType, DelayedFieldID>::new(); + let mut captured_reads = CapturedReads::< + TestTransactionType, + u32, + MockDeserializedCode, + MockVerifiedCode, + (), + >::new(); + let global_module_cache = ImmutableModuleCache::empty(); + let per_block_module_cache = SyncModuleCache::empty(); // Module exists in global cache. - let a_id = module_id("a"); - let a = verified_code("a", None); - CrossBlockModuleCache::insert(a_id.clone(), a.clone()); - captured_reads.capture_global_cache_read(a_id.clone()); - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); + global_module_cache.insert(0, mock_verified_code(0, None)); + captured_reads.capture_global_cache_read(0); + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); // Assume we republish this module: validation must fail. - let a = deserialized_code("a", Some(10)); - CrossBlockModuleCache::mark_invalid(&a_id); - mvhashmap - .module_cache() + let a = mock_deserialized_code(100, Some(10)); + global_module_cache.mark_invalid(&0); + per_block_module_cache .insert_deserialized_module( - a_id.clone(), + 0, a.code().deserialized().as_ref().clone(), a.extension().clone(), a.version(), ) .unwrap(); - assert!(!captured_reads.validate_module_reads(mvhashmap.module_cache())); + + let valid = + captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache); + assert!(!valid); // Assume we re-read the new correct version. Then validation should pass again. - captured_reads.capture_per_block_cache_read(a_id.clone(), Some(a.clone())); - assert!(captured_reads.validate_module_reads(mvhashmap.module_cache())); - assert!(!CrossBlockModuleCache::is_valid(&a_id)); - CrossBlockModuleCache::remove(&a_id); + captured_reads.capture_per_block_cache_read(0, Some(a)); + assert!(captured_reads.validate_module_reads(&global_module_cache, &per_block_module_cache)); + assert!(!global_module_cache.contains_valid(&0)); } } diff --git a/aptos-move/block-executor/src/code_cache.rs b/aptos-move/block-executor/src/code_cache.rs index fcfb31dcbed88..09baff1ab854b 100644 --- a/aptos-move/block-executor/src/code_cache.rs +++ b/aptos-move/block-executor/src/code_cache.rs @@ -3,7 +3,7 @@ use crate::{ captured_reads::CacheRead, - cross_block_caches::CrossBlockModuleCache, + cross_block_caches::get_global_module_cache, view::{LatestView, ViewState}, }; use ambassador::delegate_to_methods; @@ -146,7 +146,7 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> ModuleCache > { // First, look up the module in the cross-block global module cache. Record the read for // later validation in case the read module is republished. - if let Some(module) = CrossBlockModuleCache::get(key) { + if let Some(module) = get_global_module_cache().get(key) { match &self.latest_view { ViewState::Sync(state) => state .captured_reads diff --git a/aptos-move/block-executor/src/cross_block_caches.rs b/aptos-move/block-executor/src/cross_block_caches.rs index 5fa64130fad0c..d095170b56972 100644 --- a/aptos-move/block-executor/src/cross_block_caches.rs +++ b/aptos-move/block-executor/src/cross_block_caches.rs @@ -14,6 +14,7 @@ use move_vm_types::code::ModuleCode; use once_cell::sync::Lazy; use parking_lot::Mutex; use std::{ + hash::Hash, ops::Deref, sync::{ atomic::{AtomicBool, Ordering}, @@ -21,53 +22,47 @@ use std::{ }, }; -/// The maximum size of struct name index map in runtime environment. +/// The maximum size of struct name index map in runtime environment. Checked at block boundaries +/// only. const MAX_STRUCT_NAME_INDEX_MAP_SIZE: usize = 100_000; -/// The maximum size of [CrossBlockModuleCache]. Checked at block boundaries. -const MAX_CROSS_BLOCK_MODULE_CACHE_SIZE: usize = 100_000; - +/// A cached environment that can be persisted across blocks. Used by block executor only. static CROSS_BLOCK_ENVIRONMENT: Lazy>> = Lazy::new(|| Mutex::new(None)); -/// A cached environment that can be persisted across blocks. Used by block executor only. -pub struct CachedAptosEnvironment; - -impl CachedAptosEnvironment { - /// Returns the cached environment if it exists and has the same configuration as if it was - /// created based on the current state, or creates a new one and caches it. Should only be - /// called at the block boundaries. - pub fn get_with_delayed_field_optimization_enabled( - state_view: &impl StateView, - ) -> Result { - // Create a new environment. - let current_env = AptosEnvironment::new_with_delayed_field_optimization_enabled(state_view); - - // Lock the cache, and check if the environment is the same. - let mut cross_block_environment = CROSS_BLOCK_ENVIRONMENT.lock(); - if let Some(previous_env) = cross_block_environment.as_ref() { - if ¤t_env == previous_env { - let runtime_env = previous_env.runtime_environment(); - let struct_name_index_map_size = runtime_env - .struct_name_index_map_size() - .map_err(|e| e.finish(Location::Undefined).into_vm_status())?; - if struct_name_index_map_size > MAX_STRUCT_NAME_INDEX_MAP_SIZE { - // Cache is too large, flush it. Also flush module cache. - runtime_env.flush_struct_name_and_info_caches(); - CrossBlockModuleCache::flush_at_block_start(); - } - return Ok(previous_env.clone()); +/// Returns the cached environment if it exists and has the same configuration as if it was +/// created based on the current state, or creates a new one and caches it. Should only be +/// called at the block boundaries. +pub fn get_environment_with_delayed_field_optimization_enabled( + state_view: &impl StateView, +) -> Result { + // Create a new environment. + let current_env = AptosEnvironment::new_with_delayed_field_optimization_enabled(state_view); + + // Lock the cache, and check if the environment is the same. + let mut cross_block_environment = CROSS_BLOCK_ENVIRONMENT.lock(); + if let Some(previous_env) = cross_block_environment.as_ref() { + if ¤t_env == previous_env { + let runtime_env = previous_env.runtime_environment(); + let struct_name_index_map_size = runtime_env + .struct_name_index_map_size() + .map_err(|e| e.finish(Location::Undefined).into_vm_status())?; + if struct_name_index_map_size > MAX_STRUCT_NAME_INDEX_MAP_SIZE { + // Cache is too large, flush it. Also flush the module cache. + runtime_env.flush_struct_name_and_info_caches(); + get_global_module_cache().flush_unchecked(); } + return Ok(previous_env.clone()); } + } - // It is not cached or has changed, so we have to reset it. As a result, we need to flush - // the cross-block cache because we need to reload all modules with new configs. - *cross_block_environment = Some(current_env.clone()); - drop(cross_block_environment); - CrossBlockModuleCache::flush_at_block_start(); + // It is not cached or has changed, so we have to reset it. As a result, we need to flush + // the cross-block cache because we need to reload all modules with new configs. + *cross_block_environment = Some(current_env.clone()); + drop(cross_block_environment); + get_global_module_cache().flush_unchecked(); - Ok(current_env) - } + Ok(current_env) } /// Module code stored in cross-block module cache. @@ -121,127 +116,152 @@ where } } -type AptosImmutableModuleCode = ImmutableModuleCode; -type SyncCrossBlockModuleCache = ExplicitSyncWrapper>; -static CROSS_BLOCK_MODULE_CACHE: Lazy = - Lazy::new(|| ExplicitSyncWrapper::new(HashMap::new())); - -/// Represents an immutable cross-block cache. The size of the cache is fixed (modules cannot be -/// added or removed) within a single block, so it is only mutated at the block boundaries. At the -/// same time, modules in this cache can be marked as "invalid" so that block executor can decide -/// on whether to read the module from this cache or from elsewhere. -pub struct CrossBlockModuleCache; - -impl CrossBlockModuleCache { - /// Flushes the module cache. Should only be called at the start of the block. - pub fn flush_at_block_start() { - let mut cache = CROSS_BLOCK_MODULE_CACHE.acquire(); - cache.clear(); - } - - /// Adds new verified modules from block-level cache to the cross-block cache. Flushes the - /// cache if its size is too large. Should only be called at block end. - pub(crate) fn populate_from_code_cache_at_block_end( - modules: impl Iterator< - Item = ( - ModuleId, - Arc>>, - ), - >, - ) -> Result<(), PanicError> { - let mut cache = CROSS_BLOCK_MODULE_CACHE.acquire(); +/// An immutable cache for verified code, that can be accessed concurrently thought the block, and +/// only modified at block boundaries. +pub struct ImmutableModuleCache { + /// Module cache containing the verified code. + module_cache: ExplicitSyncWrapper>>, + /// Maximum cache size. If the size is greater than this limit, the cache is flushed. Note that + /// this can only be done at block boundaries. + capacity: usize, +} - // For all modules that are verified, add them to cache. Also reset version to storage - // version. Note that at this point it should be the case that all arced modules have the - // reference count of exactly 1. - for (id, module) in modules { - if module.code().is_verified() { - let mut module = Arc::into_inner(module).ok_or_else(|| { - let msg = format!( - "Module {}::{} has more than one strong reference count", - id.address(), - id.name() - ); - PanicError::CodeInvariantError(msg) - })?; +impl ImmutableModuleCache +where + K: Hash + Eq + Clone, - module.set_version(None); - cache.insert(id, ImmutableModuleCode::new(Arc::new(module))?); - } - } + VC: Deref>, +{ + /// Returns new empty module cache with default capacity. + pub(crate) fn empty() -> Self { + let default_capacity = 100_000; + Self::with_capacity(default_capacity) + } - // To protect against running out of memory, keep the size limited to some constant. If it - // is too large, flush the cache. - if cache.len() > MAX_CROSS_BLOCK_MODULE_CACHE_SIZE { - cache.clear(); + /// Returns new empty module cache with specified capacity. + fn with_capacity(capacity: usize) -> Self { + Self { + module_cache: ExplicitSyncWrapper::new(HashMap::new()), + capacity, } - - Ok(()) } - /// Returns true if the module is stored in cross-block cache and is valid. - pub(crate) fn is_valid(module_id: &ModuleId) -> bool { - CROSS_BLOCK_MODULE_CACHE + /// Returns true if the key exists in immutable cache and the corresponding module is valid. + pub(crate) fn contains_valid(&self, key: &K) -> bool { + self.module_cache .acquire() - .get(module_id) + .get(key) .is_some_and(|module| module.is_valid()) } - /// Marks the cached entry (if it exists) as invalid. As a result, all subsequent calls to the - /// cache will result in a cache miss. It is fine for an entry not to exist: e.g., when a new - /// module is published one can try to invalidate global cache (that does not have the module). - pub(crate) fn mark_invalid(module_id: &ModuleId) { - if let Some(module) = CROSS_BLOCK_MODULE_CACHE.acquire().get(module_id) { + /// Marks the cached module (if it exists) as invalid. As a result, all subsequent calls to the + /// cache for the associated key will result in a cache miss. Note that it is fine for an + /// entry not to exist, in which case this is a no-op. + pub(crate) fn mark_invalid(&self, key: &K) { + if let Some(module) = self.module_cache.acquire().get(key) { module.mark_invalid(); } } - /// Returns the module from the cross module cache. If the module has not been cached, or is - /// no longer valid due to module publishing, [None] is returned. - pub(crate) fn get( - module_id: &ModuleId, - ) -> Option>>> - { - CROSS_BLOCK_MODULE_CACHE - .acquire() - .get(module_id) - .and_then(|module| { - if module.is_valid() { - Some(module.inner().clone()) - } else { - None + /// Returns the module stored in cache. If the module has not been cached, or it exists but is + /// not valid, [None] is returned. + pub(crate) fn get(&self, key: &K) -> Option>>> { + self.module_cache.acquire().get(key).and_then(|module| { + if module.is_valid() { + Some(module.inner().clone()) + } else { + None + } + }) + } + + /// Flushes the cache. Should never be called throughout block-execution. Use with caution. + pub fn flush_unchecked(&self) { + self.module_cache.acquire().clear(); + } + + /// Inserts modules into the cache. Should never be called throughout block-execution. Use with + /// caution. + /// + /// Notes: + /// 1. Only verified modules are inserted. + /// 2. Versions of inserted modules is set to [None] (storage version). + /// 3. Valid modules should not be removed, and new modules should have unique ownership. If + /// these constraints are violated, a panic error is returned. + /// 4. If the cache size exceeds its capacity after all verified modules have been inserted, + /// the cache is flushed. + pub(crate) fn insert_verified_unchecked( + &self, + modules: impl Iterator>>)>, + ) -> Result<(), PanicError> { + let mut guard = self.module_cache.acquire(); + let module_cache = guard.dereference_mut(); + + for (key, module) in modules { + if module.code().is_verified() { + let mut module = module.as_ref().clone(); + module.set_version(None); + let prev = + module_cache.insert(key.clone(), ImmutableModuleCode::new(Arc::new(module))?); + + if prev.is_some_and(|prev_module| prev_module.is_valid()) { + return Err(PanicError::CodeInvariantError( + "Overwriting a valid module".to_string(), + )); } - }) + } + } + + if module_cache.len() > self.capacity { + module_cache.clear(); + } + + Ok(()) } - /// Inserts a module to the cross-block module cache. Used for tests only. + /// Insert the module to cache. Used for tests only. #[cfg(test)] - pub fn insert( - module_id: ModuleId, - module: Arc>>, - ) { - let mut cache = CROSS_BLOCK_MODULE_CACHE.acquire(); - cache.insert(module_id, ImmutableModuleCode::new(module).unwrap()); + pub(crate) fn insert(&self, key: K, module: Arc>>) { + self.module_cache + .acquire() + .insert(key, ImmutableModuleCode::new(module).unwrap()); } - /// Removes the specified module from cross-block module cache. Used for tests only. + /// Removes the module from cache. Used for tests only. #[cfg(test)] - pub fn remove(module_id: &ModuleId) { - let mut cache = CROSS_BLOCK_MODULE_CACHE.acquire(); - cache.remove(module_id); + pub(crate) fn remove(&self, key: &K) { + self.module_cache.acquire().remove(key); } - /// Returns the size of the cross-block module cache. + /// Returns the size of the cache. Used for tests only. #[cfg(test)] - pub fn size() -> usize { - CROSS_BLOCK_MODULE_CACHE.acquire().len() + pub(crate) fn size(&self) -> usize { + self.module_cache.acquire().len() } } +/// Immutable global cache. The size of the cache is fixed within a single block (modules are not +/// inserted or removed) and it is only mutated at the block boundaries. At the same time, modules +/// in this cache can be marked as "invalid" so that block executor can decide on whether to read +/// the module from this cache or from elsewhere. +#[allow(clippy::redundant_closure)] +static CROSS_BLOCK_MODULE_CACHE: Lazy< + ImmutableModuleCache, +> = Lazy::new(|| ImmutableModuleCache::empty()); + +/// Returns the module from the cross module cache. If the module has not been cached, or is +/// no longer valid due to module publishing, [None] is returned. +pub fn get_global_module_cache( +) -> &'static ImmutableModuleCache { + &CROSS_BLOCK_MODULE_CACHE +} + #[cfg(test)] mod test { use super::*; - use crate::types::test_types::{module_id, verified_code}; + use crate::types::test_types::{ + mock_deserialized_code, mock_verified_code, module_id, verified_code, + }; use aptos_types::{ on_chain_config::{FeatureFlag, Features}, state_store::{ @@ -249,27 +269,19 @@ mod test { state_value::StateValue, StateViewId, TStateView, }, }; - use claims::assert_ok; - use move_vm_types::code::{MockDeserializedCode, MockVerifiedCode}; + use claims::{assert_err, assert_ok, assert_some}; #[test] fn test_immutable_module_code() { - let module_code: ModuleCode<_, MockVerifiedCode, _, _> = - ModuleCode::from_deserialized(MockDeserializedCode::new(0), Arc::new(()), None); - assert!(ImmutableModuleCode::new(Arc::new(module_code)).is_err()); - - let module_code = - ModuleCode::from_verified(MockVerifiedCode::new(0), Arc::new(()), Some(22)); - assert!(ImmutableModuleCode::new(Arc::new(module_code)).is_err()); - - let module_code = ModuleCode::from_verified(MockVerifiedCode::new(0), Arc::new(()), None); - assert!(ImmutableModuleCode::new(Arc::new(module_code)).is_ok()); + assert!(ImmutableModuleCode::new(mock_deserialized_code(0, None)).is_err()); + assert!(ImmutableModuleCode::new(mock_deserialized_code(0, Some(22))).is_err()); + assert!(ImmutableModuleCode::new(mock_verified_code(0, Some(22))).is_err()); + assert!(ImmutableModuleCode::new(mock_verified_code(0, None)).is_ok()); } #[test] fn test_immutable_module_code_validity() { - let module_code = ModuleCode::from_verified(MockVerifiedCode::new(0), Arc::new(()), None); - let module_code = assert_ok!(ImmutableModuleCode::new(Arc::new(module_code))); + let module_code = assert_ok!(ImmutableModuleCode::new(mock_verified_code(0, None))); assert!(module_code.is_valid()); module_code.mark_invalid(); @@ -277,28 +289,65 @@ mod test { } #[test] - fn test_cross_block_module_cache() { - let valid_module_id = module_id("a"); - let valid_module_code = verified_code("a", None); - CrossBlockModuleCache::insert(valid_module_id.clone(), valid_module_code); + fn test_global_module_cache() { + let global_cache = ImmutableModuleCache::empty(); + + global_cache.insert(0, mock_verified_code(0, None)); + global_cache.insert(1, mock_verified_code(1, None)); + global_cache.mark_invalid(&1); - let invalid_module_id = module_id("b"); - let invalid_module_code = verified_code("b", None); - CrossBlockModuleCache::insert(invalid_module_id.clone(), invalid_module_code); - CrossBlockModuleCache::mark_invalid(&invalid_module_id); + assert_eq!(global_cache.size(), 2); - assert_eq!(CrossBlockModuleCache::size(), 2); - assert!(CrossBlockModuleCache::is_valid(&valid_module_id)); - assert!(!CrossBlockModuleCache::is_valid(&invalid_module_id)); + assert!(global_cache.contains_valid(&0)); + assert!(!global_cache.contains_valid(&1)); + assert!(!global_cache.contains_valid(&3)); - assert!(CrossBlockModuleCache::get(&valid_module_id).is_some()); - assert!(CrossBlockModuleCache::get(&invalid_module_id).is_none()); + assert!(global_cache.get(&0).is_some()); + assert!(global_cache.get(&1).is_none()); + assert!(global_cache.get(&3).is_none()); + } + + #[test] + fn test_insert_verified_for_global_module_cache() { + let capacity = 10; + let global_cache = ImmutableModuleCache::with_capacity(capacity); - let non_existing_id = module_id("c"); - assert!(CrossBlockModuleCache::get(&non_existing_id).is_none()); + let mut new_modules = vec![]; + for i in 0..capacity { + new_modules.push((i, mock_verified_code(i, Some(i as TxnIndex)))); + } + let result = global_cache.insert_verified_unchecked(new_modules.into_iter()); + assert!(result.is_ok()); + assert_eq!(global_cache.size(), capacity); + + // Versions should be set to storage. + for key in 0..capacity { + let code = assert_some!(global_cache.get(&key)); + assert!(code.version().is_none()) + } - CrossBlockModuleCache::remove(&valid_module_id); - CrossBlockModuleCache::remove(&invalid_module_id); + // Too many modules added, the cache should be flushed. + let new_modules = vec![(11, mock_verified_code(11, None))]; + let result = global_cache.insert_verified_unchecked(new_modules.into_iter()); + assert!(result.is_ok()); + assert_eq!(global_cache.size(), 0); + + // Should not add deserialized code. + let deserialized_modules = vec![(0, mock_deserialized_code(0, None))]; + assert_ok!(global_cache.insert_verified_unchecked(deserialized_modules.into_iter())); + assert_eq!(global_cache.size(), 0); + + // Should not override valid modules. + global_cache.insert(0, mock_verified_code(0, None)); + let new_modules = vec![(0, mock_verified_code(100, None))]; + assert_err!(global_cache.insert_verified_unchecked(new_modules.into_iter())); + + // Can override invalid modules. + global_cache.mark_invalid(&0); + let new_modules = vec![(0, mock_verified_code(100, None))]; + let result = global_cache.insert_verified_unchecked(new_modules.into_iter()); + assert!(result.is_ok()); + assert_eq!(global_cache.size(), 1); } #[derive(Default)] @@ -328,11 +377,11 @@ mod test { #[test] fn test_cross_block_module_cache_flush() { let c_id = module_id("c"); - CrossBlockModuleCache::insert(c_id.clone(), verified_code("c", None)); - assert_eq!(CrossBlockModuleCache::size(), 1); + get_global_module_cache().insert(c_id.clone(), verified_code("c", None)); + assert_eq!(get_global_module_cache().size(), 1); - CrossBlockModuleCache::flush_at_block_start(); - assert_eq!(CrossBlockModuleCache::size(), 0); + get_global_module_cache().flush_unchecked(); + assert_eq!(get_global_module_cache().size(), 0); // Now check that cache is flushed when the environment is flushed. let mut state_view = HashMapView::default(); @@ -341,9 +390,9 @@ mod test { for i in 0..10 { let name = format!("m_{}", i); let id = module_id(&name); - CrossBlockModuleCache::insert(id.clone(), verified_code(&name, None)); + get_global_module_cache().insert(id.clone(), verified_code(&name, None)); } - assert_eq!(CrossBlockModuleCache::size(), 10); + assert_eq!(get_global_module_cache().size(), 10); let state_key = StateKey::on_chain_config::().unwrap(); let mut features = Features::default(); @@ -355,10 +404,10 @@ mod test { // New environment means we need to also flush global caches - to invalidate struct name // indices. - let env_new = assert_ok!( - CachedAptosEnvironment::get_with_delayed_field_optimization_enabled(&state_view) - ); + let env_new = assert_ok!(get_environment_with_delayed_field_optimization_enabled( + &state_view + )); assert!(env_old != env_new); - assert_eq!(CrossBlockModuleCache::size(), 0); + assert_eq!(get_global_module_cache().size(), 0); } } diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index 602866900a0f5..d20f88e1bc9bd 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -8,7 +8,7 @@ use crate::{ PARALLEL_EXECUTION_SECONDS, RAYON_EXECUTION_SECONDS, TASK_EXECUTE_SECONDS, TASK_VALIDATE_SECONDS, VM_INIT_SECONDS, WORK_WITH_TASK_SECONDS, }, - cross_block_caches::CrossBlockModuleCache, + cross_block_caches::get_global_module_cache, errors::*, executor_utilities::*, explicit_sync_wrapper::ExplicitSyncWrapper, @@ -411,7 +411,10 @@ where read_set.validate_data_reads(versioned_cache.data(), idx_to_validate) && read_set.validate_group_reads(versioned_cache.group_data(), idx_to_validate) && (scheduler.skip_module_reads_validation() - || read_set.validate_module_reads(versioned_cache.module_cache())) + || read_set.validate_module_reads( + get_global_module_cache(), + versioned_cache.module_cache(), + )) } fn update_transaction_on_abort( @@ -1125,12 +1128,11 @@ where } counters::update_state_counters(versioned_cache.stats(), true); - CrossBlockModuleCache::populate_from_code_cache_at_block_end( - versioned_cache.take_modules_iter(), - ) - .map_err(|err| { - alert!("[BlockSTM] Encountered panic error: {:?}", err); - })?; + get_global_module_cache() + .insert_verified_unchecked(versioned_cache.take_modules_iter()) + .map_err(|err| { + alert!("[BlockSTM] Encountered panic error: {:?}", err); + })?; // Explicit async drops. DEFAULT_DROPPER.schedule_drop((last_input_output, scheduler, versioned_cache)); @@ -1180,7 +1182,7 @@ where })?; let extension = AptosModuleExtension::new(state_value); - CrossBlockModuleCache::mark_invalid(&id); + get_global_module_cache().mark_invalid(&id); module_cache .insert_deserialized_module( id.clone(), @@ -1594,9 +1596,7 @@ where ret.resize_with(num_txns, E::Output::skip_output); counters::update_state_counters(unsync_map.stats(), false); - CrossBlockModuleCache::populate_from_code_cache_at_block_end( - unsync_map.into_modules_iter(), - )?; + get_global_module_cache().insert_verified_unchecked(unsync_map.into_modules_iter())?; let block_end_info = if self .config @@ -1656,7 +1656,7 @@ where // Flush the cache and the environment to re-run from the "clean" state. env.runtime_environment() .flush_struct_name_and_info_caches(); - CrossBlockModuleCache::flush_at_block_start(); + get_global_module_cache().flush_unchecked(); info!("parallel execution requiring fallback"); } diff --git a/aptos-move/block-executor/src/txn_last_input_output.rs b/aptos-move/block-executor/src/txn_last_input_output.rs index 1b8b0667ee11b..fcfbde7452aa7 100644 --- a/aptos-move/block-executor/src/txn_last_input_output.rs +++ b/aptos-move/block-executor/src/txn_last_input_output.rs @@ -15,14 +15,16 @@ use aptos_types::{ fee_statement::FeeStatement, state_store::state_value::StateValueMetadata, transaction::BlockExecutableTransaction as Transaction, + vm::modules::AptosModuleExtension, write_set::WriteOp, }; use aptos_vm_types::module_write_set::ModuleWrite; use arc_swap::ArcSwapOption; use crossbeam::utils::CachePadded; use dashmap::DashSet; -use move_core_types::value::MoveTypeLayout; -use move_vm_runtime::RuntimeEnvironment; +use move_binary_format::CompiledModule; +use move_core_types::{language_storage::ModuleId, value::MoveTypeLayout}; +use move_vm_runtime::{Module, RuntimeEnvironment}; use std::{ collections::{BTreeMap, HashSet}, fmt::Debug, @@ -30,7 +32,7 @@ use std::{ sync::Arc, }; -type TxnInput = CapturedReads; +type TxnInput = CapturedReads; macro_rules! forward_on_success_or_skip_rest { ($self:ident, $txn_idx:ident, $f:ident) => {{ @@ -128,7 +130,7 @@ impl, E: Debug + Send + Clone> pub(crate) fn record( &self, txn_idx: TxnIndex, - input: CapturedReads, + input: TxnInput, output: ExecutionStatus, arced_resource_writes: Vec<(T::Key, Arc, Option>)>, group_keys_and_tags: Vec<(T::Key, HashSet)>, @@ -173,7 +175,7 @@ impl, E: Debug + Send + Clone> || Self::append_and_check(module_writes_keys, &self.module_writes, &self.module_reads) } - pub(crate) fn read_set(&self, txn_idx: TxnIndex) -> Option>> { + pub(crate) fn read_set(&self, txn_idx: TxnIndex) -> Option>> { self.inputs[txn_idx as usize].load_full() } diff --git a/aptos-move/block-executor/src/types.rs b/aptos-move/block-executor/src/types.rs index 3affa39948c76..4191c1c2649c5 100644 --- a/aptos-move/block-executor/src/types.rs +++ b/aptos-move/block-executor/src/types.rs @@ -68,22 +68,27 @@ pub(crate) mod test_types { account_address::AccountAddress, identifier::Identifier, language_storage::ModuleId, }; use move_vm_runtime::{Module, RuntimeEnvironment}; - use move_vm_types::code::ModuleCode; + use move_vm_types::code::{MockDeserializedCode, MockVerifiedCode, ModuleCode}; use std::sync::Arc; - /// Returns a dummy [ModuleCode] in deserialized state. - pub(crate) fn deserialized_code( - module_name: &str, + pub(crate) fn mock_deserialized_code( + value: usize, version: Option, - ) -> Arc>> { - let compiled_module = - empty_module_with_dependencies_and_friends(module_name, vec![], vec![]); - let extension = Arc::new(AptosModuleExtension::new(StateValue::new_legacy( - Bytes::new(), - ))); + ) -> Arc>> { Arc::new(ModuleCode::from_deserialized( - compiled_module, - extension, + MockDeserializedCode::new(value), + Arc::new(()), + version, + )) + } + + pub(crate) fn mock_verified_code( + value: usize, + version: Option, + ) -> Arc>> { + Arc::new(ModuleCode::from_verified( + MockVerifiedCode::new(value), + Arc::new(()), version, )) } diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index ba54cd4e69720..705903840e996 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -42,6 +42,7 @@ use aptos_types::{ StateViewId, TStateView, }, transaction::BlockExecutableTransaction as Transaction, + vm::modules::AptosModuleExtension, write_set::TransactionWrite, }; use aptos_vm_logging::{log_schema::AdapterLogSchema, prelude::*}; @@ -50,9 +51,12 @@ use aptos_vm_types::resolver::{ }; use bytes::Bytes; use claims::assert_ok; -use move_binary_format::errors::{PartialVMError, PartialVMResult}; -use move_core_types::{value::MoveTypeLayout, vm_status::StatusCode}; -use move_vm_runtime::RuntimeEnvironment; +use move_binary_format::{ + errors::{PartialVMError, PartialVMResult}, + CompiledModule, +}; +use move_core_types::{language_storage::ModuleId, value::MoveTypeLayout, vm_status::StatusCode}; +use move_vm_runtime::{Module, RuntimeEnvironment}; use move_vm_types::{ delayed_values::delayed_field_id::ExtractUniqueIndex, value_serde::{ @@ -162,11 +166,14 @@ pub(crate) struct ParallelState<'a, T: Transaction, X: Executable> { scheduler: &'a Scheduler, start_counter: u32, counter: &'a AtomicU32, - pub(crate) captured_reads: RefCell>, + pub(crate) captured_reads: + RefCell>, } fn get_delayed_field_value_impl( - captured_reads: &RefCell>, + captured_reads: &RefCell< + CapturedReads, + >, versioned_delayed_fields: &dyn TVersionedDelayedFieldView, wait_for: &dyn TWaitForDependency, id: &T::Identifier, @@ -304,7 +311,9 @@ fn compute_delayed_field_try_add_delta_outcome_first_time( // TODO[agg_v2](cleanup): see about the split with CapturedReads, // and whether anything should be moved there. fn delayed_field_try_add_delta_outcome_impl( - captured_reads: &RefCell>, + captured_reads: &RefCell< + CapturedReads, + >, versioned_delayed_fields: &dyn TVersionedDelayedFieldView, wait_for: &dyn TWaitForDependency, id: &T::Identifier, @@ -778,7 +787,7 @@ impl<'a, T: Transaction, X: Executable> ResourceGroupState for ParallelState< pub(crate) struct SequentialState<'a, T: Transaction> { pub(crate) unsync_map: &'a UnsyncMap, - pub(crate) read_set: RefCell>, + pub(crate) read_set: RefCell>, pub(crate) start_counter: u32, pub(crate) counter: &'a RefCell, // TODO: Move to UnsyncMap. @@ -1009,7 +1018,9 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> LatestView< } /// Drains the parallel captured reads. - pub(crate) fn take_parallel_reads(&self) -> CapturedReads { + pub(crate) fn take_parallel_reads( + &self, + ) -> CapturedReads { match &self.latest_view { ViewState::Sync(state) => state.captured_reads.take(), ViewState::Unsync(_) => { @@ -1019,7 +1030,7 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> LatestView< } /// Drains the unsync read set. - pub(crate) fn take_sequential_reads(&self) -> UnsyncReadSet { + pub(crate) fn take_sequential_reads(&self) -> UnsyncReadSet { match &self.latest_view { ViewState::Sync(_) => { unreachable!("Take unsync reads called in parallel setting") @@ -1890,7 +1901,13 @@ mod test { #[test] fn test_history_updates() { let mut view = FakeVersionedDelayedFieldView::default(); - let captured_reads = RefCell::new(CapturedReads::::new()); + let captured_reads = RefCell::new(CapturedReads::< + TestTransactionType, + ModuleId, + CompiledModule, + Module, + AptosModuleExtension, + >::new()); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2029,7 +2046,13 @@ mod test { #[test] fn test_aggregator_overflows() { let mut view = FakeVersionedDelayedFieldView::default(); - let captured_reads = RefCell::new(CapturedReads::::new()); + let captured_reads = RefCell::new(CapturedReads::< + TestTransactionType, + ModuleId, + CompiledModule, + Module, + AptosModuleExtension, + >::new()); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2168,7 +2191,13 @@ mod test { #[test] fn test_aggregator_underflows() { let mut view = FakeVersionedDelayedFieldView::default(); - let captured_reads = RefCell::new(CapturedReads::::new()); + let captured_reads = RefCell::new(CapturedReads::< + TestTransactionType, + ModuleId, + CompiledModule, + Module, + AptosModuleExtension, + >::new()); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2307,7 +2336,13 @@ mod test { #[test] fn test_read_kind_upgrade_fail() { let mut view = FakeVersionedDelayedFieldView::default(); - let captured_reads = RefCell::new(CapturedReads::::new()); + let captured_reads = RefCell::new(CapturedReads::< + TestTransactionType, + ModuleId, + CompiledModule, + Module, + AptosModuleExtension, + >::new()); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index f27304b630673..faa9bb5c75572 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -15,7 +15,7 @@ use crate::{ use aptos_abstract_gas_usage::CalibrationAlgebra; use aptos_bitvec::BitVec; use aptos_block_executor::{ - cross_block_caches::CrossBlockModuleCache, txn_commit_hook::NoOpTransactionCommitHook, + cross_block_caches::get_global_module_cache, txn_commit_hook::NoOpTransactionCommitHook, }; use aptos_crypto::HashValue; use aptos_framework::ReleaseBundle; @@ -685,7 +685,7 @@ impl FakeExecutor { // Flush cross-block cache if we are comparing sequential and parallel executions. We do it // twice to make sure that in case modules are published, we start from the empty cache. if mode == ExecutorMode::BothComparison { - CrossBlockModuleCache::flush_at_block_start(); + get_global_module_cache().flush_unchecked(); } let sequential_output = if mode != ExecutorMode::ParallelOnly { @@ -702,7 +702,7 @@ impl FakeExecutor { // Re-flush the cache again because the previous execution may have put new published code // into cross-block module cache. if mode == ExecutorMode::BothComparison { - CrossBlockModuleCache::flush_at_block_start(); + get_global_module_cache().flush_unchecked(); } let parallel_output = if mode != ExecutorMode::SequentialOnly { diff --git a/third_party/move/move-vm/runtime/src/data_cache.rs b/third_party/move/move-vm/runtime/src/data_cache.rs index b96a8c8d65e23..4bdb0a48c6a28 100644 --- a/third_party/move/move-vm/runtime/src/data_cache.rs +++ b/third_party/move/move-vm/runtime/src/data_cache.rs @@ -233,7 +233,7 @@ impl<'r> TransactionDataCache<'r> { Loader::V1(_) => { let maybe_module = module_store.module_at(&ty_tag.module_id()); let metadata: &[Metadata] = match &maybe_module { - Some(m) => &m.compiled_module_ref().metadata, + Some(m) => &m.metadata, None => &[], }; // If we need to process aggregator lifting, we pass type layout to remote. diff --git a/third_party/move/move-vm/runtime/src/loader/function.rs b/third_party/move/move-vm/runtime/src/loader/function.rs index 59fee58be2389..a013af4911360 100644 --- a/third_party/move/move-vm/runtime/src/loader/function.rs +++ b/third_party/move/move-vm/runtime/src/loader/function.rs @@ -134,8 +134,8 @@ impl LoadedFunction { LoadedFunctionOwner::Script(_) => "script::main".into(), LoadedFunctionOwner::Module(m) => format!( "0x{}::{}::{}", - m.compiled_module_ref().self_addr().to_hex(), - m.compiled_module_ref().self_name().as_str(), + m.self_addr().to_hex(), + m.self_name().as_str(), self.function.name() ), } diff --git a/third_party/move/move-vm/runtime/src/loader/mod.rs b/third_party/move/move-vm/runtime/src/loader/mod.rs index 371c8782e1f4e..618168d738e8b 100644 --- a/third_party/move/move-vm/runtime/src/loader/mod.rs +++ b/third_party/move/move-vm/runtime/src/loader/mod.rs @@ -525,7 +525,7 @@ impl LoaderV1 { .into_iter() .map(|module_id| self.load_module(&module_id, data_store, module_store)) .collect::>>()?; - dependencies::verify_script(&script, loaded_deps.iter().map(|m| m.compiled_module_ref()))?; + dependencies::verify_script(&script, loaded_deps.iter().map(|m| m.as_ref().as_ref()))?; Ok(script) } @@ -909,7 +909,7 @@ impl LoaderV1 { // verify that the transitive closure does not have cycles self.verify_module_cyclic_relations( - module_ref.compiled_module_ref(), + &module_ref, &BTreeMap::new(), &BTreeSet::new(), module_store, @@ -1034,7 +1034,7 @@ impl LoaderV1 { // once all dependencies are loaded, do the linking check let all_imm_deps = bundle_deps .into_iter() - .chain(cached_deps.iter().map(|m| m.compiled_module_ref())); + .chain(cached_deps.iter().map(|m| m.as_ref().as_ref())); let result = dependencies::verify_module(module, all_imm_deps); // if dependencies loading is not allowed to fail, the linking should not fail as well diff --git a/third_party/move/move-vm/runtime/src/loader/modules.rs b/third_party/move/move-vm/runtime/src/loader/modules.rs index 9a8cfda35aa19..e4c0f52fd7a3f 100644 --- a/third_party/move/move-vm/runtime/src/loader/modules.rs +++ b/third_party/move/move-vm/runtime/src/loader/modules.rs @@ -670,14 +670,6 @@ impl Module { self.struct_instantiations[idx as usize].field_count } - pub fn compiled_module_ref(&self) -> &CompiledModule { - &self.module - } - - pub fn compiled_module_arc(&self) -> &Arc { - &self.module - } - pub(crate) fn field_offset(&self, idx: FieldHandleIndex) -> usize { self.field_handles[idx.0 as usize].offset } diff --git a/third_party/move/move-vm/runtime/src/move_vm.rs b/third_party/move/move-vm/runtime/src/move_vm.rs index a64b6f1cb794e..b3c8ca2e89e06 100644 --- a/third_party/move/move-vm/runtime/src/move_vm.rs +++ b/third_party/move/move-vm/runtime/src/move_vm.rs @@ -21,7 +21,7 @@ use move_core_types::{ metadata::Metadata, vm_status::StatusCode, }; use move_vm_types::resolver::MoveResolver; -use std::sync::Arc; +use std::{ops::Deref, sync::Arc}; #[derive(Clone)] pub struct MoveVM { @@ -135,7 +135,7 @@ impl MoveVM { ), &ModuleStorageAdapter::new(self.runtime.module_storage_v1()), )?; - Ok(module.compiled_module_arc().clone()) + Ok(module.as_ref().deref().clone()) }, Loader::V2(_) => Err(PartialVMError::new( StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, @@ -190,11 +190,6 @@ impl MoveVM { where F: FnOnce(&[Metadata]) -> Option, { - f(&self - .runtime - .module_cache - .fetch_module(module)? - .compiled_module_ref() - .metadata) + f(&self.runtime.module_cache.fetch_module(module)?.metadata) } } diff --git a/third_party/move/move-vm/runtime/src/runtime.rs b/third_party/move/move-vm/runtime/src/runtime.rs index f0ebd88c223bd..9975335fd0fd0 100644 --- a/third_party/move/move-vm/runtime/src/runtime.rs +++ b/third_party/move/move-vm/runtime/src/runtime.rs @@ -137,10 +137,10 @@ impl VMRuntime { #[allow(deprecated)] if data_store.exists_module(&module_id)? && compat.need_check_compat() { let old_module_ref = loader.load_module(&module_id, data_store, module_store)?; - let old_module = old_module_ref.compiled_module_ref(); + let old_module = old_module_ref.as_ref().as_ref(); if loader.vm_config().use_compatibility_checker_v2 { compat - .check(old_module, module) + .check(old_module_ref.as_ref().as_ref(), module) .map_err(|e| e.finish(Location::Undefined))? } else { #[allow(deprecated)] diff --git a/third_party/move/move-vm/runtime/src/storage/environment.rs b/third_party/move/move-vm/runtime/src/storage/environment.rs index 905708b01d1e8..61698118520ef 100644 --- a/third_party/move/move-vm/runtime/src/storage/environment.rs +++ b/third_party/move/move-vm/runtime/src/storage/environment.rs @@ -125,10 +125,10 @@ impl RuntimeEnvironment { locally_verified_script.0.as_ref(), immediate_dependencies .iter() - .map(|m| m.compiled_module_ref()), + .map(|module| module.as_ref().as_ref()), )?; Script::new(locally_verified_script.0, self.struct_name_index_map()) - .map_err(|e| e.finish(Location::Script)) + .map_err(|err| err.finish(Location::Script)) } /// Creates a locally verified compiled module by running: @@ -167,7 +167,7 @@ impl RuntimeEnvironment { locally_verified_module.0.as_ref(), immediate_dependencies .iter() - .map(|m| m.compiled_module_ref()), + .map(|module| module.as_ref().as_ref()), )?; let result = Module::new( &self.natives, diff --git a/third_party/move/move-vm/runtime/src/storage/publishing.rs b/third_party/move/move-vm/runtime/src/storage/publishing.rs index 15410a3fe159f..f06277afb43be 100644 --- a/third_party/move/move-vm/runtime/src/storage/publishing.rs +++ b/third_party/move/move-vm/runtime/src/storage/publishing.rs @@ -230,8 +230,7 @@ impl<'a, M: ModuleStorage> StagingModuleStorage<'a, M> { })?; // Also verify that all friends exist. - for (friend_addr, friend_name) in module.compiled_module_ref().immediate_friends_iter() - { + for (friend_addr, friend_name) in module.immediate_friends_iter() { if !staged_module_storage.check_module_exists(friend_addr, friend_name)? { return Err(module_linker_error!(friend_addr, friend_name)); } diff --git a/third_party/move/move-vm/types/src/code/cache/module_cache.rs b/third_party/move/move-vm/types/src/code/cache/module_cache.rs index 95a5f0288d564..ffd775f2c291d 100644 --- a/third_party/move/move-vm/types/src/code/cache/module_cache.rs +++ b/third_party/move/move-vm/types/src/code/cache/module_cache.rs @@ -63,6 +63,19 @@ where } } +impl Clone for ModuleCode +where + V: Clone, +{ + fn clone(&self) -> Self { + Self { + code: self.code.clone(), + extension: self.extension.clone(), + version: self.version.clone(), + } + } +} + /// Interface for building module code to be stored in cache, e.g., if it is not yet cached. pub trait ModuleCodeBuilder { type Key: Eq + Hash + Clone; diff --git a/third_party/move/move-vm/types/src/code/cache/types.rs b/third_party/move/move-vm/types/src/code/cache/types.rs index 096357cd33a15..4fb6c62973ae7 100644 --- a/third_party/move/move-vm/types/src/code/cache/types.rs +++ b/third_party/move/move-vm/types/src/code/cache/types.rs @@ -2,6 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use bytes::Bytes; +use move_core_types::{ + account_address::AccountAddress, identifier::IdentStr, language_storage::ModuleId, +}; use std::{ops::Deref, sync::Arc}; pub trait WithBytes { @@ -16,6 +19,26 @@ pub trait WithHash { fn hash(&self) -> &[u8; 32]; } +pub trait WithAddress { + fn address(&self) -> &AccountAddress; +} + +impl WithAddress for ModuleId { + fn address(&self) -> &AccountAddress { + self.address() + } +} + +pub trait WithName { + fn name(&self) -> &IdentStr; +} + +impl WithName for ModuleId { + fn name(&self) -> &IdentStr { + self.name() + } +} + /// An entry for the code cache that can have multiple different representations. pub enum Code { /// Deserialized code, not yet verified with bytecode verifier. @@ -75,6 +98,7 @@ impl Clone for Code { } #[cfg(any(test, feature = "testing"))] +#[derive(Clone)] pub struct MockDeserializedCode(usize); #[cfg(any(test, feature = "testing"))] diff --git a/third_party/move/move-vm/types/src/code/mod.rs b/third_party/move/move-vm/types/src/code/mod.rs index e23cf886110ea..9e2d0ab57764c 100644 --- a/third_party/move/move-vm/types/src/code/mod.rs +++ b/third_party/move/move-vm/types/src/code/mod.rs @@ -13,6 +13,6 @@ pub use cache::{ UnsyncModuleCache, }, script_cache::{ambassador_impl_ScriptCache, ScriptCache, SyncScriptCache, UnsyncScriptCache}, - types::{Code, WithBytes, WithHash}, + types::{Code, WithAddress, WithBytes, WithHash, WithName}, }; pub use storage::ModuleBytesStorage;