From 27ceec89462fa63fcb077bc8f95c8b869817fe54 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 31 Jan 2025 15:19:47 +0200 Subject: [PATCH 1/7] Sketch more generic sandbox execution in API server --- core/lib/vm_interface/src/executor.rs | 2 +- .../src/types/outputs/execution_result.rs | 6 + .../src/types/outputs/statistic.rs | 4 +- .../src/execution_sandbox/execute.rs | 215 +++++++++--------- .../api_server/src/execution_sandbox/mod.rs | 2 +- .../api_server/src/execution_sandbox/tests.rs | 4 +- .../src/execution_sandbox/validate.rs | 2 +- .../src/tx_sender/gas_estimation.rs | 19 +- core/node/api_server/src/tx_sender/mod.rs | 32 ++- core/node/api_server/src/tx_sender/result.rs | 20 +- .../src/tx_sender/tests/gas_estimation.rs | 41 ++-- .../api_server/src/tx_sender/tests/send_tx.rs | 24 +- .../api_server/src/web3/namespaces/debug.rs | 4 +- .../api_server/src/web3/namespaces/zks.rs | 8 +- 14 files changed, 201 insertions(+), 182 deletions(-) diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 30534b1420cf..c5fa271957b2 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -61,7 +61,7 @@ pub trait OneshotExecutor { /// VM executor capable of validating transactions. #[async_trait] -pub trait TransactionValidator: OneshotExecutor { +pub trait TransactionValidator { /// Validates the provided transaction. async fn validate_transaction( &self, diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index e95721dbb461..8455aa4b790e 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -157,6 +157,12 @@ impl ExecutionResult { } } +impl From for ExecutionResult { + fn from(full_result: VmExecutionResultAndLogs) -> Self { + full_result.result + } +} + impl VmExecutionResultAndLogs { /// Creates a mock full result based on the provided base result. pub fn mock(result: ExecutionResult) -> Self { diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index fdb89c08f498..39ddc8e4c644 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -173,13 +173,13 @@ pub struct TransactionExecutionMetrics { pub repeated_storage_writes: usize, pub gas_used: usize, pub gas_remaining: u32, - pub event_topics: u16, + pub event_topics: u16, // FIXME: never read pub published_bytecode_bytes: usize, pub l2_l1_long_messages: usize, pub l2_l1_logs: usize, pub user_l2_l1_logs: usize, pub contracts_used: usize, - pub contracts_deployed: u16, + pub contracts_deployed: u16, // FIXME: incorrectly defined? pub vm_events: usize, pub storage_logs: usize, /// Sum of storage logs, vm events, l2->l1 logs, and the number of precompile calls. diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d58bf6ca38f3..7f412d6991ad 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -1,6 +1,10 @@ //! Implementation of "executing" methods, e.g. `eth_call`. -use std::time::{Duration, Instant}; +use std::{ + fmt, + sync::Arc, + time::{Duration, Instant}, +}; use anyhow::Context as _; use async_trait::async_trait; @@ -8,10 +12,10 @@ use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::{ReadStorage, StorageWithOverrides}, - tracer::{TimestampAsserterParams, ValidationError, ValidationParams, ValidationTraces}, - Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, - TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, + storage::StorageWithOverrides, + tracer::TimestampAsserterParams, + Call, ExecutionResult, OneshotEnv, OneshotTracingParams, TransactionExecutionMetrics, + TxExecutionArgs, VmExecutionResultAndLogs, }; use zksync_state::{PostgresStorage, PostgresStorageCaches}; use zksync_types::{ @@ -46,15 +50,6 @@ pub(crate) enum SandboxAction { } impl SandboxAction { - fn factory_deps_count(&self) -> usize { - match self { - Self::Execution { tx, .. } | Self::Call { call: tx, .. } => { - tx.execute.factory_deps.len() - } - Self::GasEstimation { tx, .. } => tx.execute.factory_deps.len(), - } - } - fn into_parts(self) -> (TxExecutionArgs, OneshotTracingParams) { match self { Self::Execution { tx, .. } => ( @@ -76,9 +71,9 @@ impl SandboxAction { /// Output of [`SandboxExecutor::execute_in_sandbox()`]. #[derive(Debug, Clone)] -pub(crate) struct SandboxExecutionOutput { +pub(crate) struct SandboxExecutionOutput { /// Output of the VM. - pub vm: VmExecutionResultAndLogs, + pub vm: R, /// Traced calls if requested. pub call_traces: Vec, /// Execution metrics. @@ -87,16 +82,83 @@ pub(crate) struct SandboxExecutionOutput { pub are_published_bytecodes_ok: bool, } -#[derive(Debug)] -enum SandboxExecutorEngine { - Real(MainOneshotExecutor), - Mock(MockOneshotExecutor), +type SandboxStorage = StorageWithOverrides>; + +/// Higher-level wrapper around a oneshot VM executor used in the API server. +#[async_trait] +pub(crate) trait SandboxExecutorEngine: + Send + Sync + fmt::Debug + TransactionValidator +{ + async fn execute_in_sandbox( + &self, + storage: SandboxStorage, + env: OneshotEnv, + args: TxExecutionArgs, + tracing_params: OneshotTracingParams, + ) -> anyhow::Result>; +} + +#[async_trait] +impl SandboxExecutorEngine for T +where + T: OneshotExecutor + + TransactionValidator + + Send + + Sync + + fmt::Debug, + R: From, +{ + async fn execute_in_sandbox( + &self, + storage: SandboxStorage, + env: OneshotEnv, + args: TxExecutionArgs, + tracing_params: OneshotTracingParams, + ) -> anyhow::Result> { + let total_factory_deps = args.transaction.execute.factory_deps.len() as u16; + let result = self + .inspect_transaction_with_bytecode_compression(storage, env, args, tracing_params) + .await?; + let metrics = + vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); + + Ok(SandboxExecutionOutput { + vm: R::from(*result.tx_result), + call_traces: result.call_traces, + metrics, + are_published_bytecodes_ok: result.compression_result.is_ok(), + }) + } +} + +pub(crate) trait SandboxVmResult { + fn engine(executor: &SandboxExecutor) -> Option<&dyn SandboxExecutorEngine>; + + fn is_supported(executor: &SandboxExecutor) -> bool { + Self::engine(executor).is_some() + } +} + +impl SandboxVmResult for ExecutionResult { + fn engine(executor: &SandboxExecutor) -> Option<&dyn SandboxExecutorEngine> { + Some(executor.engine.as_ref()) + } +} + +impl SandboxVmResult for VmExecutionResultAndLogs { + fn engine(executor: &SandboxExecutor) -> Option<&dyn SandboxExecutorEngine> { + executor.debug_engine.as_deref() + } } /// Executor of transactions / calls used in the API server. #[derive(Debug)] pub(crate) struct SandboxExecutor { - engine: SandboxExecutorEngine, + // VM execution engine. All RPC methods other than `zks_sendRawTransactionWithDetailedOutput` currently + // do not use anything logs etc. + pub(super) engine: Arc>, + debug_engine: Option>>, + pub(super) options: SandboxExecutorOptions, storage_caches: Option, pub(super) timestamp_asserter_params: Option, @@ -115,8 +177,11 @@ impl SandboxExecutor { executor.panic_on_divergence(); executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); + + let engine = Arc::new(executor); Self { - engine: SandboxExecutorEngine::Real(executor), + engine: engine.clone(), + debug_engine: Some(engine), options, storage_caches: Some(caches), timestamp_asserter_params, @@ -131,8 +196,10 @@ impl SandboxExecutor { executor: MockOneshotExecutor, options: SandboxExecutorOptions, ) -> Self { + let engine = Arc::new(executor); Self { - engine: SandboxExecutorEngine::Mock(executor), + engine: engine.clone(), + debug_engine: Some(engine), options, storage_caches: None, timestamp_asserter_params: None, @@ -141,8 +208,6 @@ impl SandboxExecutor { /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(level = "debug", skip_all)] pub async fn execute_in_sandbox( &self, vm_permit: VmPermit, @@ -151,7 +216,20 @@ impl SandboxExecutor { block_args: &BlockArgs, state_override: Option, ) -> anyhow::Result { - let total_factory_deps = action.factory_deps_count() as u16; + self.execute_with_custom_result(vm_permit, connection, action, block_args, state_override) + .await + } + + #[tracing::instrument(level = "debug", name = "execute", skip_all)] + pub async fn execute_with_custom_result( + &self, + _vm_permit: VmPermit, + connection: Connection<'static, Core>, + action: SandboxAction, + block_args: &BlockArgs, + state_override: Option, + ) -> anyhow::Result> { + let engine = R::engine(self).context("not supported")?; let (env, storage) = self .prepare_env_and_storage(connection, block_args, &action) .await?; @@ -159,24 +237,9 @@ impl SandboxExecutor { let state_override = state_override.unwrap_or_default(); let storage = apply_state_override(storage, &state_override); let (execution_args, tracing_params) = action.into_parts(); - let result = self - .inspect_transaction_with_bytecode_compression( - storage, - env, - execution_args, - tracing_params, - ) - .await?; - drop(vm_permit); - - let metrics = - vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); - Ok(SandboxExecutionOutput { - vm: *result.tx_result, - call_traces: result.call_traces, - metrics, - are_published_bytecodes_ok: result.compression_result.is_ok(), - }) + engine + .execute_in_sandbox(storage, env, execution_args, tracing_params) + .await } pub(super) async fn prepare_env_and_storage( @@ -250,67 +313,3 @@ impl SandboxExecutor { Ok((env, storage)) } } - -#[async_trait] -impl OneshotExecutor> for SandboxExecutor -where - S: ReadStorage + Send + 'static, -{ - async fn inspect_transaction_with_bytecode_compression( - &self, - storage: StorageWithOverrides, - env: OneshotEnv, - args: TxExecutionArgs, - tracing_params: OneshotTracingParams, - ) -> anyhow::Result { - match &self.engine { - SandboxExecutorEngine::Real(executor) => { - executor - .inspect_transaction_with_bytecode_compression( - storage, - env, - args, - tracing_params, - ) - .await - } - SandboxExecutorEngine::Mock(executor) => { - executor - .inspect_transaction_with_bytecode_compression( - storage, - env, - args, - tracing_params, - ) - .await - } - } - } -} - -#[async_trait] -impl TransactionValidator> for SandboxExecutor -where - S: ReadStorage + Send + 'static, -{ - async fn validate_transaction( - &self, - storage: StorageWithOverrides, - env: OneshotEnv, - tx: L2Tx, - validation_params: ValidationParams, - ) -> anyhow::Result> { - match &self.engine { - SandboxExecutorEngine::Real(executor) => { - executor - .validate_transaction(storage, env, tx, validation_params) - .await - } - SandboxExecutorEngine::Mock(executor) => { - executor - .validate_transaction(storage, env, tx, validation_params) - .await - } - } - } -} diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 78bc85de9f22..d4d3c268f34b 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -15,7 +15,7 @@ use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{SandboxAction, SandboxExecutor}, + execute::{SandboxAction, SandboxExecutor, SandboxVmResult}, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 0aff15b973e0..98edd3a4971f 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -248,7 +248,7 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args assert!(output.are_published_bytecodes_ok); let tx_result = output.vm; - assert!(!tx_result.result.is_failed(), "{tx_result:#?}"); + assert!(!tx_result.is_failed(), "{tx_result:#?}"); } #[test_casing(2, [false, true])] @@ -305,7 +305,7 @@ async fn validating_transaction(set_balance: bool) { .await .unwrap(); - let result = result.vm.result; + let result = result.vm; if set_balance { assert_matches!(result, ExecutionResult::Success { .. }); } else { diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 3d58f807a89a..6eb2f1a2bda5 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -4,7 +4,6 @@ use anyhow::Context as _; use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{ - executor::TransactionValidator, storage::StorageWithOverrides, tracer::{ TimestampAsserterParams, ValidationError as RawValidationError, ValidationParams, @@ -64,6 +63,7 @@ impl SandboxExecutor { let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); let validation_result = self + .engine .validate_transaction(storage, env, tx, validation_params) .instrument(tracing::debug_span!("validation")) .await?; diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index b4a05a0756b6..e872a033c534 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -3,7 +3,7 @@ use std::{ops, time::Instant}; use anyhow::Context; use zksync_dal::CoreDal; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, + interface::{ExecutionResult, TransactionExecutionMetrics}, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, get_max_batch_gas_limit, @@ -214,10 +214,10 @@ impl TxSender { lower_bound: &mut u64, upper_bound: &mut u64, pivot: u64, - result: &VmExecutionResultAndLogs, + result: &ExecutionResult, ) { // For now, we don't discern between "out of gas" and other failure reasons since it's difficult in the general case. - if result.result.is_failed() { + if result.is_failed() { *lower_bound = pivot + 1; } else { *upper_bound = pivot; @@ -391,19 +391,20 @@ impl<'a> GasEstimator<'a> { // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit, // and getting how much pubdata it used. - let (result, _) = self.unadjusted_step(self.max_gas_limit).await?; + let (result, metrics) = self.unadjusted_step(self.max_gas_limit).await?; // If the transaction has failed with such a large gas limit, we return an API error here right away, // since the inferred gas bounds would be unreliable in this case. result.check_api_call_result()?; // It is assumed that there is no overflow here let gas_charged_for_pubdata = - u64::from(result.statistics.pubdata_published) * self.gas_per_pubdata_byte; + u64::from(metrics.pubdata_published) * self.gas_per_pubdata_byte; - let total_gas_charged = self.max_gas_limit.checked_sub(result.refunds.gas_refunded); + let gas_refunded = 0; // FIXME: result.refunds.gas_refunded + let total_gas_charged = self.max_gas_limit.checked_sub(gas_refunded); Ok(InitialGasEstimate { total_gas_charged, - computational_gas_used: Some(result.statistics.computational_gas_used.into()), + computational_gas_used: Some(metrics.computational_gas_used.into()), operator_overhead, gas_charged_for_pubdata, }) @@ -425,7 +426,7 @@ impl<'a> GasEstimator<'a> { async fn step( &self, tx_gas_limit: u64, - ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { + ) -> Result<(ExecutionResult, TransactionExecutionMetrics), SubmitTxError> { let gas_limit_with_overhead = tx_gas_limit + self.tx_overhead(tx_gas_limit); // We need to ensure that we never use a gas limit that is higher than the maximum allowed let forced_gas_limit = @@ -436,7 +437,7 @@ impl<'a> GasEstimator<'a> { pub(super) async fn unadjusted_step( &self, forced_gas_limit: u64, - ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { + ) -> Result<(ExecutionResult, TransactionExecutionMetrics), SubmitTxError> { let mut tx = self.transaction.clone(); match &mut tx.common_data { ExecuteTransactionCommon::L1(l1_common_data) => { diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index e19cc6c1d706..449f2dd815c9 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,8 +10,8 @@ use zksync_dal::{ }; use zksync_multivm::{ interface::{ - tracer::TimestampAsserterParams as TracerTimestampAsserterParams, OneshotTracingParams, - TransactionExecutionMetrics, VmExecutionResultAndLogs, + tracer::TimestampAsserterParams as TracerTimestampAsserterParams, ExecutionResult, + OneshotTracingParams, TransactionExecutionMetrics, }, utils::{ derive_base_fee_and_gas_per_pubdata, get_max_batch_gas_limit, get_max_new_factory_deps, @@ -40,8 +40,8 @@ use zksync_vm_executor::oneshot::{ pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; use crate::execution_sandbox::{ - BlockArgs, SandboxAction, SandboxExecutor, SubmitTxStage, VmConcurrencyBarrier, - VmConcurrencyLimiter, SANDBOX_METRICS, + BlockArgs, SandboxAction, SandboxExecutor, SandboxVmResult, SubmitTxStage, + VmConcurrencyBarrier, VmConcurrencyLimiter, SANDBOX_METRICS, }; mod gas_estimation; @@ -317,12 +317,24 @@ impl TxSender { .context("failed acquiring connection to replica DB") } - #[tracing::instrument(level = "debug", skip_all, fields(tx.hash = ?tx.hash()))] - pub async fn submit_tx( + pub(crate) async fn submit_tx( &self, tx: L2Tx, block_args: BlockArgs, - ) -> Result<(L2TxSubmissionResult, VmExecutionResultAndLogs), SubmitTxError> { + ) -> Result { + self.submit_tx_with_custom_result(tx, block_args).await + } + + #[tracing::instrument(level = "debug", name = "submit_tx", skip_all, fields(tx.hash = ?tx.hash()))] + pub(crate) async fn submit_tx_with_custom_result( + &self, + tx: L2Tx, + block_args: BlockArgs, + ) -> Result { + if !R::is_supported(&self.0.executor) { + return Err(SubmitTxError::ServerShuttingDown); // FIXME: add dedicated variant + } + let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); self.validate_tx(&tx, block_args.protocol_version()).await?; @@ -348,7 +360,7 @@ impl TxSender { let execution_output = self .0 .executor - .execute_in_sandbox(vm_permit.clone(), connection, action, &block_args, None) + .execute_with_custom_result(vm_permit.clone(), connection, action, &block_args, None) .await?; tracing::info!( "Submit tx {tx_hash:?} with execution metrics {:?}", @@ -412,11 +424,11 @@ impl TxSender { L2TxSubmissionResult::Proxied => { stage_latency.set_stage(SubmitTxStage::TxProxy); stage_latency.observe(); - Ok((submission_res_handle, execution_output.vm)) + Ok(execution_output.vm) } _ => { stage_latency.observe(); - Ok((submission_res_handle, execution_output.vm)) + Ok(execution_output.vm) } } } diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index cbc55a73c7ce..0e13b2246ec0 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -1,5 +1,5 @@ use thiserror::Error; -use zksync_multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; +use zksync_multivm::interface::ExecutionResult; use zksync_types::{l2::error::TxCheckError, U256}; use zksync_web3_decl::error::EnrichedClientError; @@ -158,15 +158,15 @@ pub(crate) trait ApiCallResult: Sized { fn into_api_call_result(self) -> Result, SubmitTxError>; } -impl ApiCallResult for VmExecutionResultAndLogs { +impl ApiCallResult for ExecutionResult { fn check_api_call_result(&self) -> Result<(), SubmitTxError> { - match &self.result { - ExecutionResult::Success { .. } => Ok(()), - ExecutionResult::Revert { output } => Err(SubmitTxError::ExecutionReverted( + match self { + Self::Success { .. } => Ok(()), + Self::Revert { output } => Err(SubmitTxError::ExecutionReverted( output.to_user_friendly_string(), output.encoded_data(), )), - ExecutionResult::Halt { reason } => { + Self::Halt { reason } => { let output: SandboxExecutionError = reason.clone().into(); Err(output.into()) } @@ -174,13 +174,13 @@ impl ApiCallResult for VmExecutionResultAndLogs { } fn into_api_call_result(self) -> Result, SubmitTxError> { - match self.result { - ExecutionResult::Success { output } => Ok(output), - ExecutionResult::Revert { output } => Err(SubmitTxError::ExecutionReverted( + match self { + Self::Success { output } => Ok(output), + Self::Revert { output } => Err(SubmitTxError::ExecutionReverted( output.to_user_friendly_string(), output.encoded_data(), )), - ExecutionResult::Halt { reason } => { + Self::Halt { reason } => { let output: SandboxExecutionError = reason.into(); Err(output.into()) } diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 954792f915cc..2cd8c273f0b8 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -8,6 +8,7 @@ use zksync_system_constants::CODE_ORACLE_ADDRESS; use zksync_types::{ api::state_override::{OverrideAccount, OverrideState}, bytecode::BytecodeHash, + u256_to_h256, web3::keccak256, K256PrivateKey, }; @@ -55,12 +56,12 @@ async fn initial_gas_estimation_is_somewhat_accurate() { + initial_estimate.operator_overhead; assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + assert!(vm_result.is_failed(), "{vm_result:?}"); // A slightly larger limit should work. let initial_pivot = total_gas_charged * 64 / 63; let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + assert!(!vm_result.is_failed(), "{vm_result:?}"); } #[test_casing(5, LOAD_TEST_CASES)] @@ -91,9 +92,9 @@ async fn initial_gas_estimate_for_l1_transaction() { assert!(initial_estimate.total_gas_charged.is_none()); let (vm_result, _) = estimator.unadjusted_step(15_000).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + assert!(vm_result.is_failed(), "{vm_result:?}"); let (vm_result, _) = estimator.unadjusted_step(1_000_000).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + assert!(!vm_result.is_failed(), "{vm_result:?}"); } #[test_casing(2, [false, true])] @@ -151,7 +152,7 @@ async fn test_initial_estimate( state_override: StateOverride, tx: L2Tx, initial_pivot_multiplier: f64, -) -> VmExecutionResultAndLogs { +) -> TransactionExecutionMetrics { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; let block_args = pending_block_args(&tx_sender).await; @@ -164,14 +165,14 @@ async fn test_initial_estimate( let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + initial_estimate.operator_overhead; let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + assert!(vm_result.is_failed(), "{vm_result:?}"); // A slightly larger limit should work. let initial_pivot = (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - vm_result + let (vm_result, metrics) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.is_failed(), "{vm_result:?}"); + metrics } async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { @@ -193,14 +194,14 @@ async fn initial_estimate_for_expensive_contract(write_count: usize) { let mut state_override = StateBuilder::default().with_expensive_contract().build(); let tx = alice.create_expensive_tx(write_count); - let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let metrics = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + assert!(metrics.initial_storage_writes >= write_count, "{metrics:?}"); - let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { - (*log.log.key.address() == StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) - .then_some((*log.log.key.key(), log.log.value)) - }); - let contract_logs: HashMap<_, _> = contract_logs.collect(); - assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); + let array_start = U256::from_big_endian(&keccak256(&[0_u8; 32])); + let contract_logs = (0..write_count as u64) + .map(|i| (u256_to_h256(array_start + i), H256::from_low_u64_be(i))) + .chain([(H256::zero(), H256::from_low_u64_be(write_count as u64))]) + .collect(); state_override .get_mut(&StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) @@ -251,17 +252,17 @@ async fn initial_estimate_for_code_oracle_tx() { for (hash, keccak_hash) in warm_bytecode_hashes { println!("Testing bytecode: {hash:?}"); let tx = alice.create_code_oracle_tx(hash, keccak_hash); - let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - let stats = &vm_result.statistics.circuit_statistic; + let metrics = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let stats = &metrics.circuit_statistic; decomitter_stats = stats.code_decommitter.max(decomitter_stats); } assert!(decomitter_stats > 0.0); println!("Testing large bytecode"); let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - let vm_result = test_initial_estimate(state_override, tx, 1.05).await; + let metrics = test_initial_estimate(state_override, tx, 1.05).await; // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones - let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; + let new_decomitter_stats = metrics.circuit_statistic.code_decommitter; assert!( new_decomitter_stats > decomitter_stats * 1.5, "old={decomitter_stats}, new={new_decomitter_stats}" diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index c0b02e45ad89..036a19126e6a 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -46,8 +46,7 @@ async fn submitting_tx_requires_one_connection() { let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; let block_args = pending_block_args(&tx_sender).await; - let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + tx_sender.submit_tx(tx, block_args).await.unwrap(); let mut storage = pool.connection().await.unwrap(); storage @@ -202,9 +201,8 @@ async fn sending_transfer() { drop(storage); let transfer = alice.create_transfer(1_000_000_000.into()); - let (sub_result, vm_result) = tx_sender.submit_tx(transfer, block_args).await.unwrap(); - assert_matches!(sub_result, L2TxSubmissionResult::Added); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + let vm_result = tx_sender.submit_tx(transfer, block_args).await.unwrap(); + assert!(!vm_result.is_failed(), "{vm_result:?}"); } #[tokio::test] @@ -262,9 +260,8 @@ async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParam drop(storage); let tx = alice.create_load_test_tx(tx_params); - let (sub_result, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); - assert_matches!(sub_result, L2TxSubmissionResult::Added); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + let vm_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert!(!vm_result.is_failed(), "{vm_result:?}"); } #[tokio::test] @@ -283,9 +280,9 @@ async fn sending_reverting_transaction() { drop(storage); let tx = alice.create_counter_tx(1.into(), true); - let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + let vm_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!( - vm_result.result, + vm_result, ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") ); } @@ -306,8 +303,8 @@ async fn sending_transaction_out_of_gas() { drop(storage); let tx = alice.create_infinite_loop_tx(); - let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); - assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); + let vm_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(vm_result, ExecutionResult::Revert { .. }); } async fn submit_tx_with_validation_traces(actual_range: Range, expected_range: Range) { @@ -351,8 +348,7 @@ async fn submit_tx_with_validation_traces(actual_range: Range, expected_ran let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; let block_args = pending_block_args(&tx_sender).await; - let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + tx_sender.submit_tx(tx, block_args).await.unwrap(); let mut storage = pool.connection().await.unwrap(); let storage_tx = storage diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index a70367a858fb..faae15373a80 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -323,7 +323,7 @@ impl DebugNamespace { ) .await?; - let (output, revert_reason) = match result.vm.result { + let (output, revert_reason) = match result.vm { ExecutionResult::Success { output, .. } => (output, None), ExecutionResult::Revert { output } => (vec![], Some(output.to_string())), ExecutionResult::Halt { reason } => { @@ -335,7 +335,7 @@ impl DebugNamespace { }; let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), - result.vm.statistics.gas_used, + result.metrics.gas_used as u64, call.execute.value, call.execute.calldata, output, diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 9f7c5662a631..e65b3bfcad52 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -695,8 +695,12 @@ impl ZksNamespace { .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; - submit_result.map(|result| (hash, result.1)).map_err(|err| { + let submit_result = self + .state + .tx_sender + .submit_tx_with_custom_result(tx, block_args) + .await; + submit_result.map(|result| (hash, result)).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); err.into() From 0a5e84d16bdac8351f0adc17c20d11b2753a3e2c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 31 Jan 2025 15:27:31 +0200 Subject: [PATCH 2/7] Remove `event_topics` --- core/lib/vm_interface/src/types/outputs/statistic.rs | 2 -- core/node/api_server/src/execution_sandbox/execute.rs | 2 +- core/node/api_server/src/execution_sandbox/vm_metrics.rs | 7 ------- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 39ddc8e4c644..37670c685951 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -173,7 +173,6 @@ pub struct TransactionExecutionMetrics { pub repeated_storage_writes: usize, pub gas_used: usize, pub gas_remaining: u32, - pub event_topics: u16, // FIXME: never read pub published_bytecode_bytes: usize, pub l2_l1_long_messages: usize, pub l2_l1_logs: usize, @@ -198,7 +197,6 @@ impl Default for TransactionExecutionMetrics { repeated_storage_writes: 0, gas_used: 0, gas_remaining: u32::MAX, - event_topics: 0, published_bytecode_bytes: 0, l2_l1_long_messages: 0, l2_l1_logs: 0, diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 7f412d6991ad..d03837c8a0e5 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -155,7 +155,7 @@ impl SandboxVmResult for VmExecutionResultAndLogs { #[derive(Debug)] pub(crate) struct SandboxExecutor { // VM execution engine. All RPC methods other than `zks_sendRawTransactionWithDetailedOutput` currently - // do not use anything logs etc. + // do not use logs etc. pub(super) engine: Arc>, debug_engine: Option>>, diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 2f6d21f3a875..1d50100165d7 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -136,12 +136,6 @@ pub(super) fn collect_tx_execution_metrics( result: &VmExecutionResultAndLogs, ) -> TransactionExecutionMetrics { let writes_metrics = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - let event_topics = result - .logs - .events - .iter() - .map(|event| event.indexed_topics.len() as u16) - .sum(); let l2_l1_long_messages = VmEvent::extract_long_l2_to_l1_messages(&result.logs.events) .iter() .map(|event| event.len()) @@ -160,7 +154,6 @@ pub(super) fn collect_tx_execution_metrics( repeated_storage_writes: writes_metrics.repeated_storage_writes, gas_used: result.statistics.gas_used as usize, gas_remaining: result.statistics.gas_remaining, - event_topics, published_bytecode_bytes, l2_l1_long_messages, l2_l1_logs: result.logs.total_l2_to_l1_logs_count(), From 1d807fdda71b68b61fc109d4e88080cb7006bd90 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 31 Jan 2025 15:49:27 +0200 Subject: [PATCH 3/7] Remove unused fields in `TransactionExecutionResult` --- core/lib/dal/src/tests/mod.rs | 2 -- .../vm_interface/src/types/outputs/execution_result.rs | 4 +--- core/node/api_server/src/web3/tests/mod.rs | 2 -- core/node/state_keeper/src/executor/mod.rs | 6 ++---- core/node/state_keeper/src/io/persistence.rs | 1 - .../src/io/seal_logic/l2_block_seal_subtasks.rs | 2 -- core/node/state_keeper/src/io/tests/mod.rs | 5 ----- core/node/state_keeper/src/keeper.rs | 6 ------ core/node/state_keeper/src/seal_criteria/mod.rs | 1 - core/node/state_keeper/src/updates/l1_batch_updates.rs | 1 - core/node/state_keeper/src/updates/l2_block_updates.rs | 10 ++-------- core/node/state_keeper/src/updates/mod.rs | 7 ++----- core/node/test_utils/src/lib.rs | 2 -- 13 files changed, 7 insertions(+), 42 deletions(-) diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 11d4e55a55af..54482c4e1d51 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -162,8 +162,6 @@ pub(crate) fn mock_execution_result(transaction: L2Tx) -> TransactionExecutionRe execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, - operator_suggested_refund: 0, - compressed_bytecodes: vec![], call_traces: vec![], revert_reason: None, } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 8455aa4b790e..3cdd295f3200 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -369,7 +369,7 @@ pub struct OneshotTransactionExecutionResult { pub call_traces: Vec, } -/// High-level transaction execution result used by the API server sandbox etc. +/// High-level transaction execution result used by the state keeper etc. #[derive(Debug, Clone, PartialEq)] pub struct TransactionExecutionResult { pub transaction: Transaction, @@ -377,8 +377,6 @@ pub struct TransactionExecutionResult { pub execution_info: VmExecutionMetrics, pub execution_status: TxExecutionStatus, pub refunded_gas: u64, - pub operator_suggested_refund: u64, - pub compressed_bytecodes: Vec, pub call_traces: Vec, pub revert_reason: Option, } diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 25405b50c508..45209209291f 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -333,8 +333,6 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, - operator_suggested_refund: 0, - compressed_bytecodes: vec![], call_traces: vec![], revert_reason: None, } diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs index a6c7b92fa249..13554971b705 100644 --- a/core/node/state_keeper/src/executor/mod.rs +++ b/core/node/state_keeper/src/executor/mod.rs @@ -1,6 +1,6 @@ use zksync_multivm::interface::{ - BatchTransactionExecutionResult, Call, CompressedBytecodeInfo, ExecutionResult, Halt, - VmExecutionMetrics, VmExecutionResultAndLogs, + BatchTransactionExecutionResult, Call, ExecutionResult, Halt, VmExecutionMetrics, + VmExecutionResultAndLogs, }; use zksync_types::Transaction; pub use zksync_vm_executor::batch::MainBatchExecutorFactory; @@ -18,7 +18,6 @@ pub enum TxExecutionResult { Success { tx_result: Box, tx_metrics: Box, - compressed_bytecodes: Vec, call_tracer_result: Vec, gas_remaining: u32, }, @@ -39,7 +38,6 @@ impl TxExecutionResult { tx_metrics: Box::new(res.tx_result.get_execution_metrics(Some(tx))), gas_remaining: res.tx_result.statistics.gas_remaining, tx_result: res.tx_result.clone(), - compressed_bytecodes: res.compressed_bytecodes, call_tracer_result: res.call_traces, }, } diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 8db7fe4120ed..8a199f606e06 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -507,7 +507,6 @@ mod tests { updates.extend_from_executed_transaction( tx, tx_result, - vec![], VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 701e121285d4..dbea295f1536 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -496,8 +496,6 @@ mod tests { execution_info: Default::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, - operator_suggested_refund: 0, - compressed_bytecodes: Vec::new(), call_traces: Vec::new(), revert_reason: None, }]; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 39c9552c2fc5..a60f6dd2d758 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -283,7 +283,6 @@ async fn processing_storage_logs_when_sealing_l2_block() { execution_result, VmExecutionMetrics::default(), vec![], - vec![], ); let tx = create_transaction(10, 100); @@ -300,7 +299,6 @@ async fn processing_storage_logs_when_sealing_l2_block() { execution_result, VmExecutionMetrics::default(), vec![], - vec![], ); let l1_batch_number = L1BatchNumber(2); @@ -373,7 +371,6 @@ async fn processing_events_when_sealing_l2_block() { execution_result, VmExecutionMetrics::default(), vec![], - vec![], ); } @@ -477,7 +474,6 @@ async fn processing_dynamic_factory_deps_when_sealing_l2_block() { execution_result, VmExecutionMetrics::default(), vec![], - vec![], ); assert_eq!( @@ -568,7 +564,6 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom updates.extend_from_executed_transaction( tx.into(), create_execution_result([]), - vec![], VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 401150a3fccd..6d6b7b68d2bb 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -500,7 +500,6 @@ impl ZkSyncStateKeeper { let TxExecutionResult::Success { tx_result, tx_metrics: tx_execution_metrics, - compressed_bytecodes, call_tracer_result, .. } = result @@ -524,7 +523,6 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, *tx_result, - compressed_bytecodes, *tx_execution_metrics, call_tracer_result, ); @@ -632,7 +630,6 @@ impl ZkSyncStateKeeper { tx_result, tx_metrics: tx_execution_metrics, call_tracer_result, - compressed_bytecodes, .. } = exec_result else { @@ -643,7 +640,6 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, *tx_result, - compressed_bytecodes, *tx_execution_metrics, call_tracer_result, ); @@ -701,7 +697,6 @@ impl ZkSyncStateKeeper { let TxExecutionResult::Success { tx_result, tx_metrics: tx_execution_metrics, - compressed_bytecodes, call_tracer_result, .. } = exec_result @@ -718,7 +713,6 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, *tx_result, - compressed_bytecodes, *tx_execution_metrics, call_tracer_result, ); diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 2ea039da57bc..9081637d5628 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -280,7 +280,6 @@ mod tests { manager.extend_from_executed_transaction( tx, create_execution_result([]), - vec![], VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index f7a93b4870b9..739a4a57e0e3 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -70,7 +70,6 @@ mod tests { create_execution_result([]), VmExecutionMetrics::default(), vec![], - vec![], ); let mut l1_batch_accumulator = L1BatchUpdates::new(L1BatchNumber(1)); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index 628f9e4a2910..d33b8aa70c35 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -2,8 +2,8 @@ use std::collections::HashMap; use zksync_multivm::{ interface::{ - Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, - TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, + Call, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, + VmExecutionMetrics, VmExecutionResultAndLogs, }, vm_latest::TransactionVmExt, }; @@ -77,20 +77,17 @@ impl L2BlockUpdates { self.block_execution_metrics += execution_metrics; } - #[allow(clippy::too_many_arguments)] pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, execution_metrics: VmExecutionMetrics, - compressed_bytecodes: Vec, call_traces: Vec, ) { let saved_factory_deps = VmEvent::extract_bytecodes_marked_as_known(&tx_execution_result.logs.events); let gas_refunded = tx_execution_result.refunds.gas_refunded; - let operator_suggested_refund = tx_execution_result.refunds.operator_suggested_refund; let execution_status = if tx_execution_result.result.is_failed() { TxExecutionStatus::Failure } else { @@ -160,8 +157,6 @@ impl L2BlockUpdates { execution_info: execution_metrics, execution_status, refunded_gas: gas_refunded, - operator_suggested_refund, - compressed_bytecodes, call_traces, revert_reason, }); @@ -212,7 +207,6 @@ mod tests { create_execution_result([]), VmExecutionMetrics::default(), vec![], - vec![], ); assert_eq!(accumulator.executed_transactions.len(), 1); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index b4f548527652..5a51e82f3573 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,8 +1,8 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ - storage::StorageViewCache, Call, CompressedBytecodeInfo, FinishedL1Batch, L1BatchEnv, - SystemEnv, VmExecutionMetrics, VmExecutionResultAndLogs, + storage::StorageViewCache, Call, FinishedL1Batch, L1BatchEnv, SystemEnv, + VmExecutionMetrics, VmExecutionResultAndLogs, }, utils::{get_batch_base_fee, StorageWritesDeduplicator}, }; @@ -115,7 +115,6 @@ impl UpdatesManager { &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, - compressed_bytecodes: Vec, execution_metrics: VmExecutionMetrics, call_traces: Vec, ) { @@ -128,7 +127,6 @@ impl UpdatesManager { tx, tx_execution_result, execution_metrics, - compressed_bytecodes, call_traces, ); latency.observe(); @@ -232,7 +230,6 @@ mod tests { updates_manager.extend_from_executed_transaction( tx, create_execution_result([]), - vec![], VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index ac900e72bb6b..640e3caf9f9e 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -172,8 +172,6 @@ pub fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, - operator_suggested_refund: 0, - compressed_bytecodes: vec![], call_traces: vec![], revert_reason: None, } From 72cea0e22c9991fea987f49f5145d6efed216e91 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 31 Jan 2025 16:19:15 +0200 Subject: [PATCH 4/7] Unify executor outputs --- core/lib/vm_executor/src/batch/factory.rs | 30 +++++++--------- .../src/types/outputs/execution_result.rs | 35 +++++++------------ .../state_keeper/src/executor/tests/mod.rs | 24 ++++++++++--- core/node/state_keeper/src/testonly/mod.rs | 2 +- .../src/testonly/test_batch_executor.rs | 24 +++++++------ 5 files changed, 60 insertions(+), 55 deletions(-) diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 64afde12b9ed..73745498cea4 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, fmt, marker::PhantomData, rc::Rc, sync::Arc, time::Duration}; +use std::{fmt, marker::PhantomData, rc::Rc, sync::Arc, time::Duration}; use anyhow::Context as _; use once_cell::sync::OnceCell; @@ -9,9 +9,8 @@ use zksync_multivm::{ pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, utils::{DivergenceHandler, ShadowMut}, - BatchTransactionExecutionResult, BytecodeCompressionError, Call, CompressedBytecodeInfo, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, + BatchTransactionExecutionResult, Call, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, is_supported_by_fast_vm, pubdata_builders::pubdata_params_to_builder, @@ -174,8 +173,6 @@ impl BatchExecutorFactory } } -type BytecodeResult = Result, BytecodeCompressionError>; - #[derive(Debug)] enum BatchVm { Legacy(LegacyVmInstance), @@ -241,7 +238,7 @@ impl BatchVm { &mut self, tx: Transaction, with_compression: bool, - ) -> BatchTransactionExecutionResult { + ) -> BatchTransactionExecutionResult { let legacy_tracer_result = Arc::new(OnceCell::default()); let legacy_tracer = if Tr::TRACE_CALLS { vec![CallTracer::new(legacy_tracer_result.clone()).into_tracer_pointer()] @@ -270,7 +267,7 @@ impl BatchVm { } }; - let compressed_bytecodes = compression_result.map(Cow::into_owned); + let compressed_bytecodes = compression_result.map(drop); let legacy_traces = Arc::try_unwrap(legacy_tracer_result) .expect("failed extracting call traces") .take() @@ -289,7 +286,7 @@ impl BatchVm { BatchTransactionExecutionResult { tx_result: Box::new(tx_result), - compressed_bytecodes, + compression_result: compressed_bytecodes, call_traces, } } @@ -466,10 +463,10 @@ impl CommandReceiver { // and so we re-execute the transaction, but without compression. let res = vm.inspect_transaction(tx.clone(), true); - if let Ok(compressed_bytecodes) = res.compressed_bytecodes { + if res.compression_result.is_ok() { return Ok(BatchTransactionExecutionResult { tx_result: res.tx_result, - compressed_bytecodes, + compression_result: Ok(()), call_traces: res.call_traces, }); } @@ -480,12 +477,11 @@ impl CommandReceiver { vm.make_snapshot(); let res = vm.inspect_transaction(tx.clone(), false); - let compressed_bytecodes = res - .compressed_bytecodes + res.compression_result .context("compression failed when it wasn't applied")?; Ok(BatchTransactionExecutionResult { tx_result: res.tx_result, - compressed_bytecodes, + compression_result: Ok(()), call_traces: res.call_traces, }) } @@ -498,10 +494,10 @@ impl CommandReceiver { vm: &mut BatchVm, ) -> anyhow::Result { let res = vm.inspect_transaction(tx.clone(), true); - if let Ok(compressed_bytecodes) = res.compressed_bytecodes { + if res.compression_result.is_ok() { Ok(BatchTransactionExecutionResult { tx_result: res.tx_result, - compressed_bytecodes, + compression_result: Ok(()), call_traces: res.call_traces, }) } else { @@ -512,7 +508,7 @@ impl CommandReceiver { }; Ok(BatchTransactionExecutionResult { tx_result, - compressed_bytecodes: vec![], + compression_result: Ok(()), call_traces: vec![], }) } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 3cdd295f3200..4e9083d822b1 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -13,15 +13,9 @@ use zksync_types::{ }; use crate::{ - BytecodeCompressionError, CompressedBytecodeInfo, Halt, VmExecutionMetrics, - VmExecutionStatistics, VmRevertReason, + BytecodeCompressionError, Halt, VmExecutionMetrics, VmExecutionStatistics, VmRevertReason, }; -const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ - 58, 54, 228, 114, 145, 244, 32, 31, 175, 19, 127, 171, 8, 29, 146, 41, 91, 206, 45, 83, 190, - 44, 108, 166, 139, 168, 44, 127, 170, 156, 226, 65, -]); - pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { usize::from(u16::from_be_bytes([bytecodehash[2], bytecodehash[3]])) * 32 } @@ -51,6 +45,11 @@ impl VmEvent { 201, 71, 34, 255, 19, 234, 207, 83, 84, 124, 71, 65, 218, 181, 34, 131, 83, 160, 89, 56, 255, 205, 213, 212, 162, 213, 51, 174, 14, 97, 130, 135, ]); + /// Long signature of the L1 messenger publication event (`L1MessageSent`). + pub const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ + 58, 54, 228, 114, 145, 244, 32, 31, 175, 19, 127, 171, 8, 29, 146, 41, 91, 206, 45, 83, + 190, 44, 108, 166, 139, 168, 44, 127, 170, 156, 226, 65, + ]); /// Extracts all the "long" L2->L1 messages that were submitted by the L1Messenger contract. pub fn extract_long_l2_to_l1_messages(events: &[Self]) -> Vec> { @@ -60,7 +59,7 @@ impl VmEvent { // Filter events from the l1 messenger contract that match the expected signature. event.address == L1_MESSENGER_ADDRESS && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == L1_MESSAGE_EVENT_SIGNATURE + && event.indexed_topics[0] == Self::L1_MESSAGE_EVENT_SIGNATURE }) .map(|event| { let decoded_tokens = ethabi::decode(&[ethabi::ParamType::Bytes], &event.value) @@ -342,32 +341,24 @@ impl Call { } /// Mid-level transaction execution output returned by a [batch executor](crate::executor::BatchExecutor). -#[derive(Debug, Clone)] -pub struct BatchTransactionExecutionResult> { +#[derive(Debug)] +pub struct BatchTransactionExecutionResult { /// VM result. pub tx_result: Box, /// Compressed bytecodes used by the transaction. - pub compressed_bytecodes: C, + pub compression_result: Result<(), BytecodeCompressionError>, /// Call traces (if requested; otherwise, empty). pub call_traces: Vec, } -impl BatchTransactionExecutionResult { +impl BatchTransactionExecutionResult { pub fn was_halted(&self) -> bool { matches!(self.tx_result.result, ExecutionResult::Halt { .. }) } } /// Mid-level transaction execution output returned by a [oneshot executor](crate::executor::OneshotExecutor). -#[derive(Debug)] -pub struct OneshotTransactionExecutionResult { - /// VM result. - pub tx_result: Box, - /// Result of compressing bytecodes used by the transaction. - pub compression_result: Result<(), BytecodeCompressionError>, - /// Call traces (if requested; otherwise, empty). - pub call_traces: Vec, -} +pub type OneshotTransactionExecutionResult = BatchTransactionExecutionResult; /// High-level transaction execution result used by the state keeper etc. #[derive(Debug, Clone, PartialEq)] @@ -440,7 +431,7 @@ mod tests { ethabi::ParamType::Bytes, ], ); - assert_eq!(L1_MESSAGE_EVENT_SIGNATURE, expected_signature); + assert_eq!(VmEvent::L1_MESSAGE_EVENT_SIGNATURE, expected_signature); } #[test] diff --git a/core/node/state_keeper/src/executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs index 8d735e9ed920..43415a26fe80 100644 --- a/core/node/state_keeper/src/executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -4,11 +4,12 @@ use test_casing::{test_casing, Product}; use zksync_contracts::l2_message_root; use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::interface::{ - BatchTransactionExecutionResult, Call, CallType, ExecutionResult, Halt, + BatchTransactionExecutionResult, Call, CallType, ExecutionResult, Halt, VmEvent, }; +use zksync_system_constants::{COMPRESSOR_ADDRESS, L1_MESSENGER_ADDRESS}; use zksync_test_contracts::{Account, TestContract}; use zksync_types::{ - get_nonce_key, + address_to_h256, get_nonce_key, utils::{deployed_address_create, storage_key_for_eth_balance}, vm::FastVmMode, web3, Execute, PriorityOpId, H256, L2_MESSAGE_ROOT_ADDRESS, U256, @@ -849,9 +850,22 @@ async fn execute_tx_with_large_packable_bytecode(vm_mode: FastVmMode) { let res = executor.execute_tx(tx).await.unwrap(); assert_matches!(res.tx_result.result, ExecutionResult::Success { .. }); - assert_eq!(res.compressed_bytecodes.len(), 1); - assert_eq!(res.compressed_bytecodes[0].original, packable_bytecode); - assert!(res.compressed_bytecodes[0].compressed.len() < BYTECODE_LEN / 2); + res.compression_result.unwrap(); + + let events = &res.tx_result.logs.events; + // Extract compressed bytecodes from the long L2-to-L1 messages by the compressor contract. + let compressed_bytecodes: Vec<_> = events + .iter() + .filter(|event| { + event.address == L1_MESSENGER_ADDRESS + && event.indexed_topics[0] == VmEvent::L1_MESSAGE_EVENT_SIGNATURE + && event.indexed_topics[1] == address_to_h256(&COMPRESSOR_ADDRESS) + }) + .map(|event| &event.value) + .collect(); + + assert_eq!(compressed_bytecodes.len(), 1); + assert!(compressed_bytecodes[0].len() < BYTECODE_LEN / 2); executor.finish_batch().await.unwrap(); } diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 0484fe3198fd..0b69a6ffe5fd 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -30,7 +30,7 @@ pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs::mock_success()), - compressed_bytecodes: vec![], + compression_result: Ok(()), call_traces: vec![], } } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 9a675c7e97e8..edb77e200d61 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -263,7 +263,7 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { }, ..VmExecutionResultAndLogs::mock_success() }), - compressed_bytecodes: vec![], + compression_result: Ok(()), call_traces: vec![], } } @@ -274,7 +274,7 @@ pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs::mock(ExecutionResult::Halt { reason, })), - compressed_bytecodes: vec![], + compression_result: Ok(()), call_traces: vec![], } } @@ -369,14 +369,18 @@ impl TestBatchExecutorBuilder { for item in &scenario.actions { match item { ScenarioItem::Tx(_, tx, result) => { - batch_txs - .entry(tx.hash()) - .and_modify(|txs| txs.push_back(result.clone())) - .or_insert_with(|| { - let mut txs = VecDeque::with_capacity(1); - txs.push_back(result.clone()); - txs - }); + result.compression_result.as_ref().unwrap(); + let result = BatchTransactionExecutionResult { + tx_result: result.tx_result.clone(), + compression_result: Ok(()), + call_traces: result.call_traces.clone(), + }; + + if let Some(txs) = batch_txs.get_mut(&tx.hash()) { + txs.push_back(result); + } else { + batch_txs.insert(tx.hash(), VecDeque::from([result])); + } } ScenarioItem::Rollback(_, tx) => { rollback_set.insert(tx.hash()); From dad34891ed922b1f6c2bf60f33e532b27a0f2cd6 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 31 Jan 2025 17:54:43 +0200 Subject: [PATCH 5/7] Simplify `SandboxExecutorEngine` --- .../src/types/outputs/execution_result.rs | 6 -- .../src/execution_sandbox/execute.rs | 82 ++++++------------- .../api_server/src/execution_sandbox/mod.rs | 2 +- .../api_server/src/execution_sandbox/tests.rs | 4 +- .../src/tx_sender/gas_estimation.rs | 2 +- core/node/api_server/src/tx_sender/mod.rs | 32 +++----- .../api_server/src/tx_sender/tests/send_tx.rs | 8 +- .../web3/backend_jsonrpsee/namespaces/zks.rs | 46 +---------- .../api_server/src/web3/namespaces/debug.rs | 2 +- .../api_server/src/web3/namespaces/zks.rs | 59 ++++++++++--- 10 files changed, 91 insertions(+), 152 deletions(-) diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 4e9083d822b1..ded0a9409667 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -156,12 +156,6 @@ impl ExecutionResult { } } -impl From for ExecutionResult { - fn from(full_result: VmExecutionResultAndLogs) -> Self { - full_result.result - } -} - impl VmExecutionResultAndLogs { /// Creates a mock full result based on the provided base result. pub fn mock(result: ExecutionResult) -> Self { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d03837c8a0e5..26d1bbd27b6a 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -2,7 +2,6 @@ use std::{ fmt, - sync::Arc, time::{Duration, Instant}, }; @@ -15,11 +14,11 @@ use zksync_multivm::interface::{ storage::StorageWithOverrides, tracer::TimestampAsserterParams, Call, ExecutionResult, OneshotEnv, OneshotTracingParams, TransactionExecutionMetrics, - TxExecutionArgs, VmExecutionResultAndLogs, + TxExecutionArgs, VmEvent, }; use zksync_state::{PostgresStorage, PostgresStorageCaches}; use zksync_types::{ - api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Transaction, + api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, StorageLog, Transaction, }; use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; @@ -71,9 +70,13 @@ impl SandboxAction { /// Output of [`SandboxExecutor::execute_in_sandbox()`]. #[derive(Debug, Clone)] -pub(crate) struct SandboxExecutionOutput { +pub(crate) struct SandboxExecutionOutput { /// Output of the VM. - pub vm: R, + pub result: ExecutionResult, + /// Write logs produced by the VM. + pub write_logs: Vec, + /// Events produced by the VM. + pub events: Vec, /// Traced calls if requested. pub call_traces: Vec, /// Execution metrics. @@ -86,7 +89,7 @@ type SandboxStorage = StorageWithOverrides>; /// Higher-level wrapper around a oneshot VM executor used in the API server. #[async_trait] -pub(crate) trait SandboxExecutorEngine: +pub(crate) trait SandboxExecutorEngine: Send + Sync + fmt::Debug + TransactionValidator { async fn execute_in_sandbox( @@ -95,18 +98,17 @@ pub(crate) trait SandboxExecutorEngine: env: OneshotEnv, args: TxExecutionArgs, tracing_params: OneshotTracingParams, - ) -> anyhow::Result>; + ) -> anyhow::Result; } #[async_trait] -impl SandboxExecutorEngine for T +impl SandboxExecutorEngine for T where T: OneshotExecutor + TransactionValidator + Send + Sync + fmt::Debug, - R: From, { async fn execute_in_sandbox( &self, @@ -114,7 +116,7 @@ where env: OneshotEnv, args: TxExecutionArgs, tracing_params: OneshotTracingParams, - ) -> anyhow::Result> { + ) -> anyhow::Result { let total_factory_deps = args.transaction.execute.factory_deps.len() as u16; let result = self .inspect_transaction_with_bytecode_compression(storage, env, args, tracing_params) @@ -122,8 +124,14 @@ where let metrics = vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); + let storage_logs = result.tx_result.logs.storage_logs; Ok(SandboxExecutionOutput { - vm: R::from(*result.tx_result), + result: result.tx_result.result, + write_logs: storage_logs + .into_iter() + .filter_map(|log| log.log.is_write().then_some(log.log)) + .collect(), + events: result.tx_result.logs.events, call_traces: result.call_traces, metrics, are_published_bytecodes_ok: result.compression_result.is_ok(), @@ -131,34 +139,10 @@ where } } -pub(crate) trait SandboxVmResult { - fn engine(executor: &SandboxExecutor) -> Option<&dyn SandboxExecutorEngine>; - - fn is_supported(executor: &SandboxExecutor) -> bool { - Self::engine(executor).is_some() - } -} - -impl SandboxVmResult for ExecutionResult { - fn engine(executor: &SandboxExecutor) -> Option<&dyn SandboxExecutorEngine> { - Some(executor.engine.as_ref()) - } -} - -impl SandboxVmResult for VmExecutionResultAndLogs { - fn engine(executor: &SandboxExecutor) -> Option<&dyn SandboxExecutorEngine> { - executor.debug_engine.as_deref() - } -} - /// Executor of transactions / calls used in the API server. #[derive(Debug)] pub(crate) struct SandboxExecutor { - // VM execution engine. All RPC methods other than `zks_sendRawTransactionWithDetailedOutput` currently - // do not use logs etc. - pub(super) engine: Arc>, - debug_engine: Option>>, - + pub(super) engine: Box, pub(super) options: SandboxExecutorOptions, storage_caches: Option, pub(super) timestamp_asserter_params: Option, @@ -178,10 +162,8 @@ impl SandboxExecutor { executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); - let engine = Arc::new(executor); Self { - engine: engine.clone(), - debug_engine: Some(engine), + engine: Box::new(executor), options, storage_caches: Some(caches), timestamp_asserter_params, @@ -196,10 +178,8 @@ impl SandboxExecutor { executor: MockOneshotExecutor, options: SandboxExecutorOptions, ) -> Self { - let engine = Arc::new(executor); Self { - engine: engine.clone(), - debug_engine: Some(engine), + engine: Box::new(executor), options, storage_caches: None, timestamp_asserter_params: None, @@ -209,27 +189,13 @@ impl SandboxExecutor { /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) pub async fn execute_in_sandbox( - &self, - vm_permit: VmPermit, - connection: Connection<'static, Core>, - action: SandboxAction, - block_args: &BlockArgs, - state_override: Option, - ) -> anyhow::Result { - self.execute_with_custom_result(vm_permit, connection, action, block_args, state_override) - .await - } - - #[tracing::instrument(level = "debug", name = "execute", skip_all)] - pub async fn execute_with_custom_result( &self, _vm_permit: VmPermit, connection: Connection<'static, Core>, action: SandboxAction, block_args: &BlockArgs, state_override: Option, - ) -> anyhow::Result> { - let engine = R::engine(self).context("not supported")?; + ) -> anyhow::Result { let (env, storage) = self .prepare_env_and_storage(connection, block_args, &action) .await?; @@ -237,7 +203,7 @@ impl SandboxExecutor { let state_override = state_override.unwrap_or_default(); let storage = apply_state_override(storage, &state_override); let (execution_args, tracing_params) = action.into_parts(); - engine + self.engine .execute_in_sandbox(storage, env, execution_args, tracing_params) .await } diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index d4d3c268f34b..413080328808 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -15,7 +15,7 @@ use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{SandboxAction, SandboxExecutor, SandboxVmResult}, + execute::{SandboxAction, SandboxExecutionOutput, SandboxExecutor}, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 98edd3a4971f..f5ccec457698 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -247,7 +247,7 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args .unwrap(); assert!(output.are_published_bytecodes_ok); - let tx_result = output.vm; + let tx_result = output.result; assert!(!tx_result.is_failed(), "{tx_result:#?}"); } @@ -305,7 +305,7 @@ async fn validating_transaction(set_balance: bool) { .await .unwrap(); - let result = result.vm; + let result = result.result; if set_balance { assert_matches!(result, ExecutionResult::Success { .. }); } else { diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index e872a033c534..d2e62177dd90 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -477,7 +477,7 @@ impl<'a> GasEstimator<'a> { self.state_override.clone(), ) .await?; - Ok((execution_output.vm, execution_output.metrics)) + Ok((execution_output.result, execution_output.metrics)) } async fn finalize( diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 449f2dd815c9..eb3d45ceacfb 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,8 +10,8 @@ use zksync_dal::{ }; use zksync_multivm::{ interface::{ - tracer::TimestampAsserterParams as TracerTimestampAsserterParams, ExecutionResult, - OneshotTracingParams, TransactionExecutionMetrics, + tracer::TimestampAsserterParams as TracerTimestampAsserterParams, OneshotTracingParams, + TransactionExecutionMetrics, }, utils::{ derive_base_fee_and_gas_per_pubdata, get_max_batch_gas_limit, get_max_new_factory_deps, @@ -40,7 +40,7 @@ use zksync_vm_executor::oneshot::{ pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; use crate::execution_sandbox::{ - BlockArgs, SandboxAction, SandboxExecutor, SandboxVmResult, SubmitTxStage, + BlockArgs, SandboxAction, SandboxExecutionOutput, SandboxExecutor, SubmitTxStage, VmConcurrencyBarrier, VmConcurrencyLimiter, SANDBOX_METRICS, }; @@ -317,24 +317,12 @@ impl TxSender { .context("failed acquiring connection to replica DB") } - pub(crate) async fn submit_tx( - &self, - tx: L2Tx, - block_args: BlockArgs, - ) -> Result { - self.submit_tx_with_custom_result(tx, block_args).await - } - #[tracing::instrument(level = "debug", name = "submit_tx", skip_all, fields(tx.hash = ?tx.hash()))] - pub(crate) async fn submit_tx_with_custom_result( + pub(crate) async fn submit_tx( &self, tx: L2Tx, block_args: BlockArgs, - ) -> Result { - if !R::is_supported(&self.0.executor) { - return Err(SubmitTxError::ServerShuttingDown); // FIXME: add dedicated variant - } - + ) -> Result { let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); self.validate_tx(&tx, block_args.protocol_version()).await?; @@ -360,7 +348,7 @@ impl TxSender { let execution_output = self .0 .executor - .execute_with_custom_result(vm_permit.clone(), connection, action, &block_args, None) + .execute_in_sandbox(vm_permit.clone(), connection, action, &block_args, None) .await?; tracing::info!( "Submit tx {tx_hash:?} with execution metrics {:?}", @@ -424,11 +412,11 @@ impl TxSender { L2TxSubmissionResult::Proxied => { stage_latency.set_stage(SubmitTxStage::TxProxy); stage_latency.observe(); - Ok(execution_output.vm) + Ok(execution_output) } - _ => { + L2TxSubmissionResult::Added | L2TxSubmissionResult::Replaced => { stage_latency.observe(); - Ok(execution_output.vm) + Ok(execution_output) } } } @@ -648,7 +636,7 @@ impl TxSender { .executor .execute_in_sandbox(vm_permit, connection, action, &block_args, state_override) .await?; - result.vm.into_api_call_result() + result.result.into_api_call_result() } pub async fn gas_price(&self) -> anyhow::Result { diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index 036a19126e6a..0209487c42b3 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -202,7 +202,7 @@ async fn sending_transfer() { let transfer = alice.create_transfer(1_000_000_000.into()); let vm_result = tx_sender.submit_tx(transfer, block_args).await.unwrap(); - assert!(!vm_result.is_failed(), "{vm_result:?}"); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); } #[tokio::test] @@ -261,7 +261,7 @@ async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParam let tx = alice.create_load_test_tx(tx_params); let vm_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); - assert!(!vm_result.is_failed(), "{vm_result:?}"); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); } #[tokio::test] @@ -282,7 +282,7 @@ async fn sending_reverting_transaction() { let tx = alice.create_counter_tx(1.into(), true); let vm_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!( - vm_result, + vm_result.result, ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") ); } @@ -304,7 +304,7 @@ async fn sending_transaction_out_of_gas() { let tx = alice.create_infinite_loop_tx(); let vm_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); - assert_matches!(vm_result, ExecutionResult::Revert { .. }); + assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); } async fn submit_tx_with_validation_traces(actual_range: Range, expected_range: Range) { diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 21f3f5ae49e1..3b5b920ca2f8 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -1,11 +1,9 @@ use std::collections::HashMap; -use zksync_multivm::interface::VmEvent; use zksync_types::{ api::{ - state_override::StateOverride, ApiStorageLog, BlockDetails, BridgeAddresses, - L1BatchDetails, L2ToL1LogProof, Log, Proof, ProtocolVersion, TransactionDetailedResult, - TransactionDetails, + state_override::StateOverride, BlockDetails, BridgeAddresses, L1BatchDetails, + L2ToL1LogProof, Proof, ProtocolVersion, TransactionDetailedResult, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -203,46 +201,6 @@ impl ZksNamespaceServer for ZksNamespace { ) -> RpcResult { self.send_raw_transaction_with_detailed_output_impl(tx_bytes) .await - .map(|result| TransactionDetailedResult { - transaction_hash: result.0, - storage_logs: result - .1 - .logs - .storage_logs - .iter() - .filter(|x| x.log.is_write()) - .map(ApiStorageLog::from) - .collect(), - events: result - .1 - .logs - .events - .iter() - .map(|event| { - let mut log = map_event(event); - log.transaction_hash = Some(result.0); - log - }) - .collect(), - }) .map_err(|err| self.current_method().map_err(err)) } } - -fn map_event(vm_event: &VmEvent) -> Log { - Log { - address: vm_event.address, - topics: vm_event.indexed_topics.clone(), - data: web3::Bytes::from(vm_event.value.clone()), - block_hash: None, - block_number: None, - l1_batch_number: Some(U64::from(vm_event.location.0 .0)), - transaction_hash: None, - transaction_index: Some(web3::Index::from(vm_event.location.1)), - log_index: None, - transaction_log_index: None, - log_type: None, - removed: Some(false), - block_timestamp: None, - } -} diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index faae15373a80..e0704febb17e 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -323,7 +323,7 @@ impl DebugNamespace { ) .await?; - let (output, revert_reason) = match result.vm { + let (output, revert_reason) = match result.result { ExecutionResult::Success { output, .. } => (output, None), ExecutionResult::Revert { output } => (vec![], Some(output.to_string())), ExecutionResult::Halt { reason } => { diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index e65b3bfcad52..c7e742fc24c7 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -5,13 +5,14 @@ use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiError; use zksync_mini_merkle_tree::MiniMerkleTree; -use zksync_multivm::interface::VmExecutionResultAndLogs; +use zksync_multivm::interface::VmEvent; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ address_to_h256, api::{ - state_override::StateOverride, BlockDetails, BridgeAddresses, GetLogsFilter, - L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, StorageProof, TransactionDetails, + self, state_override::StateOverride, BlockDetails, BridgeAddresses, GetLogsFilter, + L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, StorageProof, + TransactionDetailedResult, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -22,6 +23,7 @@ use zksync_types::{ tokens::ETHEREUM_ADDRESS, transaction_request::CallRequest, utils::storage_key_for_standard_token_balance, + web3, web3::Bytes, AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, @@ -686,24 +688,55 @@ impl ZksNamespace { pub async fn send_raw_transaction_with_detailed_output_impl( &self, tx_bytes: Bytes, - ) -> Result<(H256, VmExecutionResultAndLogs), Web3Error> { + ) -> Result { let mut connection = self.state.acquire_connection().await?; let block_args = BlockArgs::pending(&mut connection).await?; drop(connection); - let (mut tx, hash) = self + let (mut tx, tx_hash) = self .state .parse_transaction_bytes(&tx_bytes.0, &block_args)?; - tx.set_input(tx_bytes.0, hash); + tx.set_input(tx_bytes.0, tx_hash); - let submit_result = self + let submit_output = self .state .tx_sender - .submit_tx_with_custom_result(tx, block_args) - .await; - submit_result.map(|result| (hash, result)).map_err(|err| { - tracing::debug!("Send raw transaction error: {err}"); - API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); - err.into() + .submit_tx(tx, block_args) + .await + .map_err(|err| { + tracing::debug!("Send raw transaction error: {err}"); + API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); + err + })?; + Ok(TransactionDetailedResult { + transaction_hash: tx_hash, + storage_logs: submit_output + .write_logs + .into_iter() + .map(Into::into) + .collect(), + events: submit_output + .events + .into_iter() + .map(|event| map_event(event, tx_hash)) + .collect(), }) } } + +fn map_event(vm_event: VmEvent, tx_hash: H256) -> api::Log { + api::Log { + address: vm_event.address, + topics: vm_event.indexed_topics, + data: web3::Bytes::from(vm_event.value), + block_hash: None, + block_number: None, + l1_batch_number: Some(U64::from(vm_event.location.0 .0)), + transaction_hash: Some(tx_hash), + transaction_index: Some(web3::Index::from(vm_event.location.1)), + log_index: None, + transaction_log_index: None, + log_type: None, + removed: Some(false), + block_timestamp: None, + } +} From 99f8f08aa2235183677551767c8c07bfeb926f9f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 3 Feb 2025 15:39:05 +0200 Subject: [PATCH 6/7] Refactor VM metric types --- core/lib/dal/src/transactions_dal.rs | 7 +- .../src/types/outputs/statistic.rs | 74 +++---------------- .../src/execution_sandbox/vm_metrics.rs | 41 +++++----- .../src/tx_sender/gas_estimation.rs | 12 +-- core/node/api_server/src/tx_sender/mod.rs | 4 +- .../src/tx_sender/tests/gas_estimation.rs | 9 ++- .../api_server/src/web3/namespaces/debug.rs | 2 +- .../state_keeper/src/seal_criteria/mod.rs | 8 +- 8 files changed, 55 insertions(+), 102 deletions(-) diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 6b35c507b919..2c00a838dd70 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -482,9 +482,10 @@ impl TransactionsDal<'_, '_> { value, &paymaster, &paymaster_input, - exec_info.gas_used as i64, - (exec_info.initial_storage_writes + exec_info.repeated_storage_writes) as i32, - exec_info.contracts_used as i32, + exec_info.vm.gas_used as i64, + (exec_info.writes.initial_storage_writes + exec_info.writes.repeated_storage_writes) + as i32, + exec_info.vm.contracts_used as i32, received_at, timestamp_asserter_range_start, timestamp_asserter_range_end, diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 37670c685951..a69dd05c6df1 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -147,14 +147,6 @@ pub struct DeduplicatedWritesMetrics { } impl DeduplicatedWritesMetrics { - pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { - Self { - initial_storage_writes: tx_metrics.initial_storage_writes, - repeated_storage_writes: tx_metrics.repeated_storage_writes, - total_updated_values_size: tx_metrics.total_updated_values_size, - } - } - pub fn size(&self, protocol_version: ProtocolVersionId) -> usize { if protocol_version.is_pre_boojum() { self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE @@ -167,54 +159,28 @@ impl DeduplicatedWritesMetrics { } } +/// [`VmExecutionMetrics`] + some other metrics specific for executing a single transaction (e.g., +/// gas remaining after execution and gas refunded by the bootloader). #[derive(Debug, Clone, Copy)] pub struct TransactionExecutionMetrics { - pub initial_storage_writes: usize, - pub repeated_storage_writes: usize, - pub gas_used: usize, + pub writes: DeduplicatedWritesMetrics, + pub vm: VmExecutionMetrics, pub gas_remaining: u32, - pub published_bytecode_bytes: usize, - pub l2_l1_long_messages: usize, - pub l2_l1_logs: usize, - pub user_l2_l1_logs: usize, - pub contracts_used: usize, - pub contracts_deployed: u16, // FIXME: incorrectly defined? - pub vm_events: usize, - pub storage_logs: usize, - /// Sum of storage logs, vm events, l2->l1 logs, and the number of precompile calls. - pub total_log_queries: usize, - pub cycles_used: u32, - pub computational_gas_used: u32, - pub total_updated_values_size: usize, - pub pubdata_published: u32, - pub circuit_statistic: CircuitStatistic, + pub gas_refunded: u64, } impl Default for TransactionExecutionMetrics { fn default() -> Self { Self { - initial_storage_writes: 0, - repeated_storage_writes: 0, - gas_used: 0, + writes: DeduplicatedWritesMetrics::default(), + vm: VmExecutionMetrics::default(), gas_remaining: u32::MAX, - published_bytecode_bytes: 0, - l2_l1_long_messages: 0, - l2_l1_logs: 0, - user_l2_l1_logs: 0, - contracts_used: 0, - contracts_deployed: 0, - vm_events: 0, - storage_logs: 0, - total_log_queries: 0, - cycles_used: 0, - computational_gas_used: 0, - total_updated_values_size: 0, - pubdata_published: 0, - circuit_statistic: Default::default(), + gas_refunded: 0, } } } +/// Metrics for a (part of) VM execution. #[derive(Debug, Clone, Copy, Default, PartialEq, Serialize)] pub struct VmExecutionMetrics { pub gas_used: usize, @@ -222,10 +188,11 @@ pub struct VmExecutionMetrics { pub l2_l1_long_messages: usize, pub l2_to_l1_logs: usize, pub user_l2_to_l1_logs: usize, - pub contracts_used: usize, + pub contracts_used: usize, // FIXME: incorrectly defined? pub contracts_deployed: u16, pub vm_events: usize, pub storage_logs: usize, + /// Sum of storage logs, vm events, l2->l1 logs, and the number of precompile calls. pub total_log_queries: usize, pub cycles_used: u32, pub computational_gas_used: u32, @@ -234,25 +201,6 @@ pub struct VmExecutionMetrics { } impl VmExecutionMetrics { - pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { - Self { - published_bytecode_bytes: tx_metrics.published_bytecode_bytes, - l2_l1_long_messages: tx_metrics.l2_l1_long_messages, - l2_to_l1_logs: tx_metrics.l2_l1_logs, - user_l2_to_l1_logs: tx_metrics.user_l2_l1_logs, - contracts_deployed: tx_metrics.contracts_deployed, - contracts_used: tx_metrics.contracts_used, - gas_used: tx_metrics.gas_used, - storage_logs: tx_metrics.storage_logs, - vm_events: tx_metrics.vm_events, - total_log_queries: tx_metrics.total_log_queries, - cycles_used: tx_metrics.cycles_used, - computational_gas_used: tx_metrics.computational_gas_used, - pubdata_published: tx_metrics.pubdata_published, - circuit_statistic: tx_metrics.circuit_statistic, - } - } - pub fn size(&self) -> usize { self.l2_to_l1_logs * L2ToL1Log::SERIALIZED_SIZE + self.l2_l1_long_messages diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 1d50100165d7..90ef0a85df80 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -4,7 +4,9 @@ use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmEvent, VmExecutionResultAndLogs}, + interface::{ + TransactionExecutionMetrics, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, + }, utils::StorageWritesDeduplicator, }; use zksync_types::{bytecode::BytecodeHash, H256}; @@ -135,7 +137,7 @@ pub(super) fn collect_tx_execution_metrics( contracts_deployed: u16, result: &VmExecutionResultAndLogs, ) -> TransactionExecutionMetrics { - let writes_metrics = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); + let writes = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); let l2_l1_long_messages = VmEvent::extract_long_l2_to_l1_messages(&result.logs.events) .iter() .map(|event| event.len()) @@ -150,23 +152,24 @@ pub(super) fn collect_tx_execution_metrics( .sum(); TransactionExecutionMetrics { - initial_storage_writes: writes_metrics.initial_storage_writes, - repeated_storage_writes: writes_metrics.repeated_storage_writes, - gas_used: result.statistics.gas_used as usize, + writes, + vm: VmExecutionMetrics { + gas_used: result.statistics.gas_used as usize, + published_bytecode_bytes, + l2_l1_long_messages, + l2_to_l1_logs: result.logs.total_l2_to_l1_logs_count(), + user_l2_to_l1_logs: result.logs.user_l2_to_l1_logs.len(), + contracts_used: result.statistics.contracts_used, + contracts_deployed, + vm_events: result.logs.events.len(), + storage_logs: result.logs.storage_logs.len(), + total_log_queries: result.statistics.total_log_queries, + cycles_used: result.statistics.cycles_used, + computational_gas_used: result.statistics.computational_gas_used, + pubdata_published: result.statistics.pubdata_published, + circuit_statistic: result.statistics.circuit_statistic, + }, gas_remaining: result.statistics.gas_remaining, - published_bytecode_bytes, - l2_l1_long_messages, - l2_l1_logs: result.logs.total_l2_to_l1_logs_count(), - user_l2_l1_logs: result.logs.user_l2_to_l1_logs.len(), - contracts_used: result.statistics.contracts_used, - contracts_deployed, - vm_events: result.logs.events.len(), - storage_logs: result.logs.storage_logs.len(), - total_log_queries: result.statistics.total_log_queries, - cycles_used: result.statistics.cycles_used, - computational_gas_used: result.statistics.computational_gas_used, - total_updated_values_size: writes_metrics.total_updated_values_size, - pubdata_published: result.statistics.pubdata_published, - circuit_statistic: result.statistics.circuit_statistic, + gas_refunded: result.refunds.gas_refunded, } } diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index d2e62177dd90..a99e7b6c31a8 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -398,13 +398,12 @@ impl<'a> GasEstimator<'a> { // It is assumed that there is no overflow here let gas_charged_for_pubdata = - u64::from(metrics.pubdata_published) * self.gas_per_pubdata_byte; + u64::from(metrics.vm.pubdata_published) * self.gas_per_pubdata_byte; - let gas_refunded = 0; // FIXME: result.refunds.gas_refunded - let total_gas_charged = self.max_gas_limit.checked_sub(gas_refunded); + let total_gas_charged = self.max_gas_limit.checked_sub(metrics.gas_refunded); Ok(InitialGasEstimate { total_gas_charged, - computational_gas_used: Some(metrics.computational_gas_used.into()), + computational_gas_used: Some(metrics.vm.computational_gas_used.into()), operator_overhead, gas_charged_for_pubdata, }) @@ -488,7 +487,7 @@ impl<'a> GasEstimator<'a> { let (result, tx_metrics) = self.step(suggested_gas_limit).await?; result.into_api_call_result()?; self.sender - .ensure_tx_executable(&self.transaction, &tx_metrics, false)?; + .ensure_tx_executable(&self.transaction, tx_metrics, false)?; // Now, we need to calculate the final overhead for the transaction. let overhead = derive_overhead( @@ -518,7 +517,8 @@ impl<'a> GasEstimator<'a> { } }; - let gas_for_pubdata = u64::from(tx_metrics.pubdata_published) * self.gas_per_pubdata_byte; + let gas_for_pubdata = + u64::from(tx_metrics.vm.pubdata_published) * self.gas_per_pubdata_byte; let estimated_gas_for_pubdata = (gas_for_pubdata as f64 * estimated_fee_scale_factor) as u64; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index eb3d45ceacfb..be2b823393aa 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -381,7 +381,7 @@ impl TxSender { } let mut stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DbInsert); - self.ensure_tx_executable(&tx.clone().into(), &execution_output.metrics, true)?; + self.ensure_tx_executable(&tx.clone().into(), execution_output.metrics, true)?; let validation_traces = validation_result?; let submission_res_handle = self @@ -658,7 +658,7 @@ impl TxSender { fn ensure_tx_executable( &self, transaction: &Transaction, - tx_metrics: &TransactionExecutionMetrics, + tx_metrics: TransactionExecutionMetrics, log_message: bool, ) -> Result<(), SubmitTxError> { // Hash is not computable for the provided `transaction` during gas estimation (it doesn't have diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 2cd8c273f0b8..8f06cb60edeb 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -195,7 +195,10 @@ async fn initial_estimate_for_expensive_contract(write_count: usize) { let tx = alice.create_expensive_tx(write_count); let metrics = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - assert!(metrics.initial_storage_writes >= write_count, "{metrics:?}"); + assert!( + metrics.writes.initial_storage_writes >= write_count, + "{metrics:?}" + ); let array_start = U256::from_big_endian(&keccak256(&[0_u8; 32])); let contract_logs = (0..write_count as u64) @@ -253,7 +256,7 @@ async fn initial_estimate_for_code_oracle_tx() { println!("Testing bytecode: {hash:?}"); let tx = alice.create_code_oracle_tx(hash, keccak_hash); let metrics = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - let stats = &metrics.circuit_statistic; + let stats = &metrics.vm.circuit_statistic; decomitter_stats = stats.code_decommitter.max(decomitter_stats); } assert!(decomitter_stats > 0.0); @@ -262,7 +265,7 @@ async fn initial_estimate_for_code_oracle_tx() { let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); let metrics = test_initial_estimate(state_override, tx, 1.05).await; // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones - let new_decomitter_stats = metrics.circuit_statistic.code_decommitter; + let new_decomitter_stats = metrics.vm.circuit_statistic.code_decommitter; assert!( new_decomitter_stats > decomitter_stats * 1.5, "old={decomitter_stats}, new={new_decomitter_stats}" diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e0704febb17e..4d9418376f46 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -335,7 +335,7 @@ impl DebugNamespace { }; let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), - result.metrics.gas_used as u64, + result.metrics.vm.gas_used as u64, call.execute.value, call.execute.calldata, output, diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 9081637d5628..c7300bfda410 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -166,14 +166,12 @@ impl SealData { /// performed by the transaction are initial. pub fn for_transaction( transaction: &Transaction, - tx_metrics: &TransactionExecutionMetrics, + tx_metrics: TransactionExecutionMetrics, ) -> Self { - let execution_metrics = VmExecutionMetrics::from_tx_metrics(tx_metrics); - let writes_metrics = DeduplicatedWritesMetrics::from_tx_metrics(tx_metrics); Self { - execution_metrics, + execution_metrics: tx_metrics.vm, cumulative_size: transaction.bootloader_encoding_size(), - writes_metrics, + writes_metrics: tx_metrics.writes, gas_remaining: tx_metrics.gas_remaining, } } From 2ca443a2a5931ed12822e3883e8a49072892c51a Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 4 Feb 2025 21:09:39 +0200 Subject: [PATCH 7/7] Move FIXME to correct place --- core/lib/vm_interface/src/types/outputs/statistic.rs | 2 +- core/node/api_server/src/execution_sandbox/vm_metrics.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index a69dd05c6df1..5ee7d6879697 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -188,7 +188,7 @@ pub struct VmExecutionMetrics { pub l2_l1_long_messages: usize, pub l2_to_l1_logs: usize, pub user_l2_to_l1_logs: usize, - pub contracts_used: usize, // FIXME: incorrectly defined? + pub contracts_used: usize, pub contracts_deployed: u16, pub vm_events: usize, pub storage_logs: usize, diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 90ef0a85df80..44834dd7201d 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -160,6 +160,7 @@ pub(super) fn collect_tx_execution_metrics( l2_to_l1_logs: result.logs.total_l2_to_l1_logs_count(), user_l2_to_l1_logs: result.logs.user_l2_to_l1_logs.len(), contracts_used: result.statistics.contracts_used, + // FIXME: incorrectly defined? (a number of factory deps, not number of contracts deployed) contracts_deployed, vm_events: result.logs.events.len(), storage_logs: result.logs.storage_logs.len(),