diff --git a/core/lib/dal/.sqlx/query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json b/core/lib/dal/.sqlx/query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json deleted file mode 100644 index 0c7acd0125b7..000000000000 --- a/core/lib/dal/.sqlx/query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n sl AS (\n SELECT DISTINCT\n ON (storage_logs.tx_hash) *\n FROM\n storage_logs\n WHERE\n storage_logs.address = $1\n AND storage_logs.tx_hash = ANY ($3)\n ORDER BY\n storage_logs.tx_hash,\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n sl.key AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN sl ON sl.value != $2\n AND sl.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "tx_hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - }, - { - "ordinal": 2, - "name": "l1_batch_tx_index", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "block_number!", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "error", - "type_info": "Varchar" - }, - { - "ordinal": 5, - "name": "effective_gas_price", - "type_info": "Numeric" - }, - { - "ordinal": 6, - "name": "initiator_address", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "transfer_to?", - "type_info": "Jsonb" - }, - { - "ordinal": 8, - "name": "execute_contract_address?", - "type_info": "Jsonb" - }, - { - "ordinal": 9, - "name": "tx_format?", - "type_info": "Int4" - }, - { - "ordinal": 10, - "name": "refunded_gas", - "type_info": "Int8" - }, - { - "ordinal": 11, - "name": "gas_limit", - "type_info": "Numeric" - }, - { - "ordinal": 12, - "name": "block_hash", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "l1_batch_number?", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "contract_address?", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "ByteaArray" - ] - }, - "nullable": [ - false, - true, - true, - true, - true, - true, - false, - null, - null, - true, - false, - true, - false, - true, - true - ] - }, - "hash": "3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19" -} diff --git a/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json b/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json new file mode 100644 index 000000000000..93934a3a0bed --- /dev/null +++ b/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json @@ -0,0 +1,108 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "l1_batch_tx_index", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "block_number!", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "error", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "effective_gas_price", + "type_info": "Numeric" + }, + { + "ordinal": 6, + "name": "initiator_address", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "transfer_to?", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "execute_contract_address?", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "tx_format?", + "type_info": "Int4" + }, + { + "ordinal": 10, + "name": "refunded_gas", + "type_info": "Int8" + }, + { + "ordinal": 11, + "name": "gas_limit", + "type_info": "Numeric" + }, + { + "ordinal": 12, + "name": "block_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, + "name": "l1_batch_number?", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "contract_address?", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "ByteaArray" + ] + }, + "nullable": [ + false, + true, + true, + true, + true, + true, + false, + null, + null, + true, + false, + true, + false, + true, + true + ] + }, + "hash": "d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338" +} diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 54bdb9da632e..b7cbf16c89c7 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -4,8 +4,8 @@ use zksync_db_connection::{ match_query_as, }; use zksync_types::{ - api, api::TransactionReceipt, Address, L2BlockNumber, L2ChainId, Transaction, - ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, + api, api::TransactionReceipt, event::DEPLOY_EVENT_SIGNATURE, Address, L2BlockNumber, L2ChainId, + Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; use crate::{ @@ -35,22 +35,25 @@ impl TransactionsWeb3Dal<'_, '_> { hashes: &[H256], ) -> DalResult> { let hash_bytes: Vec<_> = hashes.iter().map(H256::as_bytes).collect(); + // Clarification for first part of the query(`WITH` clause): + // Looking for `ContractDeployed` event in the events table + // to find the address of deployed contract let mut receipts: Vec = sqlx::query_as!( StorageTransactionReceipt, r#" WITH - sl AS ( + events AS ( SELECT DISTINCT - ON (storage_logs.tx_hash) * + ON (events.tx_hash) * FROM - storage_logs + events WHERE - storage_logs.address = $1 - AND storage_logs.tx_hash = ANY ($3) + events.address = $1 + AND events.topic1 = $2 + AND events.tx_hash = ANY ($3) ORDER BY - storage_logs.tx_hash, - storage_logs.miniblock_number DESC, - storage_logs.operation_number DESC + events.tx_hash, + events.event_index_in_tx DESC ) SELECT transactions.hash AS tx_hash, @@ -67,21 +70,20 @@ impl TransactionsWeb3Dal<'_, '_> { transactions.gas_limit AS gas_limit, miniblocks.hash AS "block_hash", miniblocks.l1_batch_number AS "l1_batch_number?", - sl.key AS "contract_address?" + events.topic4 AS "contract_address?" FROM transactions JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN sl ON sl.value != $2 - AND sl.tx_hash = transactions.hash + LEFT JOIN events ON events.tx_hash = transactions.hash WHERE transactions.hash = ANY ($3) AND transactions.data != '{}'::jsonb "#, // ^ Filter out transactions with pruned data, which would lead to potentially incomplete / bogus // transaction info. - ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), - &hash_bytes as &[&[u8]] + CONTRACT_DEPLOYER_ADDRESS.as_bytes(), + DEPLOY_EVENT_SIGNATURE.as_bytes(), + &hash_bytes as &[&[u8]], ) .instrument("get_transaction_receipts") .with_arg("hashes.len", &hashes.len()) diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 05b5fc81720e..a8477a8bb672 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -23,7 +23,7 @@ pub async fn run_main_node( // For now in case of error we just log it and allow the server // to continue running. if let Err(err) = super::run_main_node(ctx, cfg, secrets, ConnectionPool(pool)).await { - tracing::error!(%err, "Consensus actor failed"); + tracing::error!("Consensus actor failed: {err:#}"); } else { tracing::info!("Consensus actor stopped"); } diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 8d44e38cc6ee..71352d9b5b5d 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -28,7 +28,7 @@ use crate::{ L1BatchParams, L2BlockParams, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, - metrics::KEEPER_METRICS, + metrics::{L2BlockSealReason, AGGREGATION_METRICS, KEEPER_METRICS}, seal_criteria::{IoSealCriteria, L2BlockMaxPayloadSizeSealer, TimeoutSealer}, updates::UpdatesManager, MempoolGuard, @@ -63,10 +63,19 @@ impl IoSealCriteria for MempoolIO { fn should_seal_l2_block(&mut self, manager: &UpdatesManager) -> bool { if self.timeout_sealer.should_seal_l2_block(manager) { + AGGREGATION_METRICS.l2_block_reason_inc(&L2BlockSealReason::Timeout); return true; } - self.l2_block_max_payload_size_sealer + + if self + .l2_block_max_payload_size_sealer .should_seal_l2_block(manager) + { + AGGREGATION_METRICS.l2_block_reason_inc(&L2BlockSealReason::PayloadSize); + return true; + } + + false } } diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 6e315ddd6c09..21ac54a93819 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -332,6 +332,7 @@ impl ZkSyncStateKeeper { &mut self, updates: &UpdatesManager, ) -> Result { + let latency = KEEPER_METRICS.wait_for_l2_block_params.start(); let cursor = updates.io_cursor(); while !self.is_canceled() { if let Some(params) = self @@ -340,6 +341,7 @@ impl ZkSyncStateKeeper { .await .context("error waiting for new L2 block params")? { + latency.observe(); return Ok(params); } } @@ -490,6 +492,8 @@ impl ZkSyncStateKeeper { } while !self.is_canceled() { + let full_latency = KEEPER_METRICS.process_l1_batch_loop_iteration.start(); + if self .io .should_seal_l1_batch_unconditionally(updates_manager) @@ -515,7 +519,7 @@ impl ZkSyncStateKeeper { .map_err(|e| e.context("wait_for_new_l2_block_params"))?; tracing::debug!( "Initialized new L2 block #{} (L1 batch #{}) with timestamp {}", - updates_manager.l2_block.number, + updates_manager.l2_block.number + 1, updates_manager.l1_batch.number, display_timestamp(new_l2_block_params.timestamp) ); @@ -541,6 +545,7 @@ impl ZkSyncStateKeeper { .process_one_tx(batch_executor, updates_manager, tx.clone()) .await?; + let latency = KEEPER_METRICS.match_seal_resolution.start(); match &seal_resolution { SealResolution::NoSeal | SealResolution::IncludeAndSeal => { let TxExecutionResult::Success { @@ -586,6 +591,7 @@ impl ZkSyncStateKeeper { .with_context(|| format!("cannot reject transaction {tx_hash:?}"))?; } }; + latency.observe(); if seal_resolution.should_seal() { tracing::debug!( @@ -593,8 +599,10 @@ impl ZkSyncStateKeeper { transaction {tx_hash}", updates_manager.l1_batch.number ); + full_latency.observe(); return Ok(()); } + full_latency.observe(); } Err(Error::Canceled) } @@ -671,10 +679,14 @@ impl ZkSyncStateKeeper { updates_manager: &mut UpdatesManager, tx: Transaction, ) -> anyhow::Result<(SealResolution, TxExecutionResult)> { + let latency = KEEPER_METRICS.execute_tx_outer_time.start(); let exec_result = batch_executor .execute_tx(tx.clone()) .await .with_context(|| format!("failed executing transaction {:?}", tx.hash()))?; + latency.observe(); + + let latency = KEEPER_METRICS.determine_seal_resolution.start(); // All of `TxExecutionResult::BootloaderOutOfGasForTx`, // `Halt::NotEnoughGasProvided` correspond to out-of-gas errors but of different nature. // - `BootloaderOutOfGasForTx`: it is returned when bootloader stack frame run out of gas before tx execution finished. @@ -702,7 +714,7 @@ impl ZkSyncStateKeeper { } else { SealResolution::ExcludeAndSeal }; - AGGREGATION_METRICS.inc(error_message, &resolution); + AGGREGATION_METRICS.l1_batch_reason_inc(error_message, &resolution); resolution } TxExecutionResult::RejectedByVm { reason } => { @@ -785,6 +797,7 @@ impl ZkSyncStateKeeper { ) } }; + latency.observe(); Ok((resolution, exec_result)) } } diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index d1a7269860ff..7d33f9a841f2 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -75,6 +75,21 @@ pub struct StateKeeperMetrics { pub gas_price_too_high: Counter, /// Number of times blob base fee was reported as too high. pub blob_base_fee_too_high: Counter, + /// The time it takes to match seal resolution for each tx. + #[metrics(buckets = Buckets::LATENCIES)] + pub match_seal_resolution: Histogram, + /// The time it takes to determine seal resolution for each tx. + #[metrics(buckets = Buckets::LATENCIES)] + pub determine_seal_resolution: Histogram, + /// The time it takes for state keeper to wait for tx execution result from batch executor. + #[metrics(buckets = Buckets::LATENCIES)] + pub execute_tx_outer_time: Histogram, + /// The time it takes for one iteration of the main loop in `process_l1_batch`. + #[metrics(buckets = Buckets::LATENCIES)] + pub process_l1_batch_loop_iteration: Histogram, + /// The time it takes to wait for new L2 block parameters + #[metrics(buckets = Buckets::LATENCIES)] + pub wait_for_l2_block_params: Histogram, } #[vise::register] @@ -139,6 +154,13 @@ impl From<&SealResolution> for SealResolutionLabel { } } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "reason", rename_all = "snake_case")] +pub(super) enum L2BlockSealReason { + Timeout, + PayloadSize, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] struct TxAggregationLabels { criterion: &'static str, @@ -149,10 +171,11 @@ struct TxAggregationLabels { #[metrics(prefix = "server_tx_aggregation")] pub(super) struct TxAggregationMetrics { reason: Family, + l2_block_reason: Family, } impl TxAggregationMetrics { - pub fn inc(&self, criterion: &'static str, resolution: &SealResolution) { + pub fn l1_batch_reason_inc(&self, criterion: &'static str, resolution: &SealResolution) { let labels = TxAggregationLabels { criterion, seal_resolution: Some(resolution.into()), @@ -160,13 +183,17 @@ impl TxAggregationMetrics { self.reason[&labels].inc(); } - pub fn inc_criterion(&self, criterion: &'static str) { + pub fn l1_batch_reason_inc_criterion(&self, criterion: &'static str) { let labels = TxAggregationLabels { criterion, seal_resolution: None, }; self.reason[&labels].inc(); } + + pub fn l2_block_reason_inc(&self, reason: &L2BlockSealReason) { + self.l2_block_reason[reason].inc(); + } } #[vise::register] @@ -441,3 +468,16 @@ impl BatchTipMetrics { #[vise::register] pub(crate) static BATCH_TIP_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_state_keeper_updates_manager")] +pub struct UpdatesManagerMetrics { + #[metrics(buckets = Buckets::LATENCIES)] + pub finish_batch: Histogram, + #[metrics(buckets = Buckets::LATENCIES)] + pub extend_from_executed_transaction: Histogram, +} + +#[vise::register] +pub(crate) static UPDATES_MANAGER_METRICS: vise::Global = + vise::Global::new(); diff --git a/core/node/state_keeper/src/seal_criteria/conditional_sealer.rs b/core/node/state_keeper/src/seal_criteria/conditional_sealer.rs index d29e66cd2b5b..cd00d4f89360 100644 --- a/core/node/state_keeper/src/seal_criteria/conditional_sealer.rs +++ b/core/node/state_keeper/src/seal_criteria/conditional_sealer.rs @@ -103,7 +103,8 @@ impl ConditionalSealer for SequencerSealer { "L1 batch #{l1_batch_number} processed by `{name}` with resolution {seal_resolution:?}", name = sealer.prom_criterion_name() ); - AGGREGATION_METRICS.inc(sealer.prom_criterion_name(), &seal_resolution); + AGGREGATION_METRICS + .l1_batch_reason_inc(sealer.prom_criterion_name(), &seal_resolution); } SealResolution::NoSeal => { /* Don't do anything */ } } diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 51ad1c4ad906..d309c6c20203 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -165,7 +165,7 @@ impl IoSealCriteria for TimeoutSealer { millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; if should_seal_timeout { - AGGREGATION_METRICS.inc_criterion(RULE_NAME); + AGGREGATION_METRICS.l1_batch_reason_inc_criterion(RULE_NAME); tracing::debug!( "Decided to seal L1 batch using rule `{RULE_NAME}`; batch timestamp: {}, \ commit deadline: {block_commit_deadline_ms}ms", diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index bb33a6f58678..b775cdaa0df8 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -14,7 +14,7 @@ use zksync_utils::bytecode::CompressedBytecodeInfo; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; use super::{ io::{IoCursor, L2BlockParams}, - metrics::BATCH_TIP_METRICS, + metrics::{BATCH_TIP_METRICS, UPDATES_MANAGER_METRICS}, }; use crate::types::ExecutionMetricsForCriteria; @@ -111,6 +111,9 @@ impl UpdatesManager { execution_metrics: ExecutionMetrics, call_traces: Vec, ) { + let latency = UPDATES_MANAGER_METRICS + .extend_from_executed_transaction + .start(); self.storage_writes_deduplicator .apply(&tx_execution_result.logs.storage_logs); self.l2_block.extend_from_executed_transaction( @@ -121,9 +124,11 @@ impl UpdatesManager { compressed_bytecodes, call_traces, ); + latency.observe(); } pub(crate) fn finish_batch(&mut self, finished_batch: FinishedL1Batch) { + let latency = UPDATES_MANAGER_METRICS.finish_batch.start(); assert!( self.l1_batch.finished.is_none(), "Cannot finish already finished batch" @@ -144,6 +149,8 @@ impl UpdatesManager { batch_tip_metrics.execution_metrics, ); self.l1_batch.finished = Some(finished_batch); + + latency.observe(); } /// Pushes a new L2 block with the specified timestamp into this manager. The previously diff --git a/core/tests/ts-integration/contracts/inner-outer/inner.sol b/core/tests/ts-integration/contracts/inner-outer/inner.sol new file mode 100644 index 000000000000..2d857c9dd63b --- /dev/null +++ b/core/tests/ts-integration/contracts/inner-outer/inner.sol @@ -0,0 +1,9 @@ +pragma solidity ^0.8.0; + +contract Inner { + uint256 public value; + + constructor(uint256 _value) { + value = _value; + } +} diff --git a/core/tests/ts-integration/contracts/inner-outer/outer.sol b/core/tests/ts-integration/contracts/inner-outer/outer.sol new file mode 100644 index 000000000000..935fd5a529ed --- /dev/null +++ b/core/tests/ts-integration/contracts/inner-outer/outer.sol @@ -0,0 +1,11 @@ +pragma solidity ^0.8.0; + +import "./inner.sol"; + +contract Outer { + Inner public innerContract; + + constructor(uint256 _value) { + innerContract = new Inner(_value); + } +} diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index ff590a24cf59..9f87b68d34b5 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -18,7 +18,9 @@ const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; const contracts = { counter: getTestContract('Counter'), - events: getTestContract('Emitter') + events: getTestContract('Emitter'), + outer: getTestContract('Outer'), + inner: getTestContract('Inner') }; describe('web3 API compatibility tests', () => { @@ -929,6 +931,29 @@ describe('web3 API compatibility tests', () => { expect(txFromApi.v! <= 1).toEqual(true); }); + // We want to be sure that correct(outer) contract address is return in the transaction receipt, + // when there is a contract that initializa another contract in the constructor + test('Should check inner-outer contract address in the receipt of the deploy tx', async () => { + const deploymentNonce = await alice.getDeploymentNonce(); + const expectedAddress = zksync.utils.createAddress(alice.address, deploymentNonce); + + const expectedBytecode = contracts.outer.bytecode; + + let innerContractBytecode = contracts.inner.bytecode; + let outerContractOverrides = { + customData: { + factoryDeps: [innerContractBytecode] + } + }; + const outerContract = await deployContract(alice, contracts.outer, [1], undefined, outerContractOverrides); + let receipt = await outerContract.deployTransaction.wait(); + + const deployedBytecode = await alice.provider.getCode(receipt.contractAddress); + + expect(expectedAddress).toEqual(receipt.contractAddress); + expect(expectedBytecode).toEqual(deployedBytecode); + }); + afterAll(async () => { await testMaster.deinitialize(); });