diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 45bd4d30491..dbc3699c087 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,9 +1,6 @@ name: Continuous Integration Checks on: - push: - branches-ignore: - - master pull_request: branches-ignore: - master @@ -51,7 +48,7 @@ jobs: shellcheck ci/ci-tests.sh - name: Run CI script shell: bash # Default on Winblows is powershell - run: ./ci/ci-tests.sh + run: CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: strategy: @@ -141,15 +138,24 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} rustup override set ${{ env.TOOLCHAIN }} - - name: Fetch full tree and rebase on upstream + - name: Get PR base branch + id: get-pr-base-branch run: | - git remote add upstream https://github.com/lightningdevkit/rust-lightning + BRANCH=${{ github.event.pull_request.base.ref }} + echo "PR_BASE_BRANCH=$BRANCH" >> "$GITHUB_OUTPUT" + - name: Fetch full tree and rebase on PR base branch + env: + PR_BASE_BRANCH: ${{ steps.get-pr-base-branch.outputs.PR_BASE_BRANCH }} + run: | + git remote add upstream https://github.com/p2pderivatives/rust-lightning git fetch upstream export GIT_COMMITTER_EMAIL="rl-ci@example.com" export GIT_COMMITTER_NAME="RL CI" - git rebase upstream/main + git rebase upstream/$PR_BASE_BRANCH - name: For each commit, run cargo check (including in fuzz) - run: ci/check-each-commit.sh upstream/main + env: + PR_BASE_BRANCH: ${{ steps.get-pr-base-branch.outputs.PR_BASE_BRANCH }} + run: ci/check-each-commit.sh upstream/$PR_BASE_BRANCH check_release: runs-on: ubuntu-latest diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 6b89a98fdda..9b83c3dba4c 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -130,11 +130,14 @@ else [ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose cargo check --verbose --color always fi +[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd # Test that we can build downstream code with only the "release pins". pushd msrv-no-dev-deps-check PIN_RELEASE_DEPS +# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0 +[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose cargo check popd diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2a1b9e9a70a..cfeeaabf1a4 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -162,6 +162,10 @@ impl chain::Watch for TestChainMonitor { self.chain_monitor.update_channel(funding_txo, update) } + fn update_channel_funding_txo(&self, _: OutPoint, _: OutPoint, _: u64) -> chain::ChannelMonitorUpdateStatus { + unimplemented!() + } + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } @@ -317,6 +321,7 @@ fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) { // We can (obviously) temp-fail a monitor update }, APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"), + APIError::ExternalError { err } => panic!("{}", err), } } #[inline] diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 7f4e7ad4019..f065c802441 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -242,6 +242,11 @@ pub fn do_test(data: &[u8], out: Out) { config: None, feerate_sat_per_1000_weight: None, channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown), + funding_redeemscript: None, + holder_funding_pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(), + counter_funding_pubkey: None, + original_funding_outpoint: None, + channel_keys_id: [0; 32], }); } Some(&$first_hops_vec[..]) diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index e7171cf3656..1c37d794ece 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -120,7 +120,7 @@ impl std::ops::Deref for ValidatedBlockHeader { impl ValidatedBlockHeader { /// Checks that the header correctly builds on previous_header: the claimed work differential /// matches the actual PoW and the difficulty transition is possible, i.e., within 4x. - fn check_builds_on(&self, previous_header: &ValidatedBlockHeader, network: Network) -> BlockSourceResult<()> { + fn check_builds_on(&self, previous_header: &ValidatedBlockHeader, _network: Network) -> BlockSourceResult<()> { if self.header.prev_blockhash != previous_header.block_hash { return Err(BlockSourceError::persistent("invalid previous block hash")); } @@ -129,24 +129,25 @@ impl ValidatedBlockHeader { return Err(BlockSourceError::persistent("invalid block height")); } - let work = self.header.work(); - if self.chainwork != previous_header.chainwork + work { - return Err(BlockSourceError::persistent("invalid chainwork")); - } - - if let Network::Bitcoin = network { - if self.height % 2016 == 0 { - let target = self.header.target(); - let previous_target = previous_header.header.target(); - let min_target = previous_target >> 2; - let max_target = previous_target << 2; - if target > max_target || target < min_target { - return Err(BlockSourceError::persistent("invalid difficulty transition")) - } - } else if self.header.bits != previous_header.header.bits { - return Err(BlockSourceError::persistent("invalid difficulty")) - } - } + // let work = self.header.work(); + // if self.chainwork != previous_header.chainwork + work { + // return Err(BlockSourceError::persistent("invalid chainwork")); + // } + + // TODO(Tibo): This causes issues with Esplora, temporary fix. + // if let Network::Bitcoin = network { + // if self.height % 2016 == 0 { + // let target = self.header.target(); + // let previous_target = previous_header.header.target(); + // let min_target = previous_target >> 2; + // let max_target = previous_target << 2; + // if target > max_target || target < min_target { + // return Err(BlockSourceError::persistent("invalid difficulty transition")) + // } + // } else if self.header.bits != previous_header.header.bits { + // return Err(BlockSourceError::persistent("invalid difficulty")) + // } + // } Ok(()) } diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 8a4ebd5d950..b7fa1e2a094 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.29.0" lightning = { version = "0.0.117", path = "../lightning" } -tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] } +tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] } [dev-dependencies] -tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } +tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } lightning = { version = "0.0.117", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 5527d85adf6..3278f5ba33c 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -32,8 +32,9 @@ use bitcoin::secp256k1::PublicKey; use tokio::net::TcpStream; -use tokio::time; +use tokio::{io, time}; use tokio::sync::mpsc; +use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; @@ -58,7 +59,7 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0); // define a trivial two- and three- select macro with the specific types we need and just use that. pub(crate) enum SelectorOutput { - A(Option<()>), B(Option<()>), C(tokio::io::Result<()>), + A(Option<()>), B(Option<()>), C(tokio::io::Result), } pub(crate) struct TwoSelector< @@ -86,7 +87,7 @@ impl< } pub(crate) struct ThreeSelector< - A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin + A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin > { pub a: A, pub b: B, @@ -94,7 +95,7 @@ pub(crate) struct ThreeSelector< } impl< - A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin + A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin > Future for ThreeSelector { type Output = SelectorOutput; fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { @@ -118,7 +119,7 @@ impl< /// Connection object (in an Arc>) in each SocketDescriptor we create as well as in the /// read future (which is returned by schedule_read). struct Connection { - writer: Option>, + writer: Option>, // Because our PeerManager is templated by user-provided types, and we can't (as far as I can // tell) have a const RawWakerVTable built out of templated functions, we need some indirection // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail. @@ -155,7 +156,7 @@ impl Connection { async fn schedule_read( peer_manager: PM, us: Arc>, - reader: Arc, + mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>, ) where PM::Target: APeerManager { @@ -199,7 +200,7 @@ impl Connection { ThreeSelector { a: Box::pin(write_avail_receiver.recv()), b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + c: Box::pin(reader.read(&mut buf)), }.await }; match select_result { @@ -210,9 +211,8 @@ impl Connection { } }, SelectorOutput::B(_) => {}, - SelectorOutput::C(res) => { - if res.is_err() { break Disconnect::PeerDisconnected; } - match reader.try_read(&mut buf) { + SelectorOutput::C(read) => { + match read { Ok(0) => break Disconnect::PeerDisconnected, Ok(len) => { let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]); @@ -226,10 +226,6 @@ impl Connection { Err(_) => break Disconnect::CloseConnection, } }, - Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { - // readable() is allowed to spuriously wake, so we have to handle - // WouldBlock here. - }, Err(_) => break Disconnect::PeerDisconnected, } }, @@ -243,14 +239,18 @@ impl Connection { // here. let _ = tokio::task::yield_now().await; }; - us.lock().unwrap().writer.take(); + let writer_option = us.lock().unwrap().writer.take(); + if let Some(mut writer) = writer_option { + // If the socket is already closed, shutdown() will fail, so just ignore it. + let _ = writer.shutdown().await; + } if let Disconnect::PeerDisconnected = disconnect_type { peer_manager.as_ref().socket_disconnected(&our_descriptor); peer_manager.as_ref().process_events(); } } - fn new(stream: StdTcpStream) -> (Arc, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { + fn new(stream: StdTcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { // We only ever need a channel of depth 1 here: if we returned a non-full write to the // PeerManager, we will eventually get notified that there is room in the socket to write // new bytes, which will generate an event. That event will be popped off the queue before @@ -262,11 +262,11 @@ impl Connection { // false. let (read_waker, read_receiver) = mpsc::channel(1); stream.set_nonblocking(true).unwrap(); - let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap()); + let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap()); - (Arc::clone(&tokio_stream), write_receiver, read_receiver, + (reader, write_receiver, read_receiver, Arc::new(Mutex::new(Self { - writer: Some(tokio_stream), write_avail, read_waker, read_paused: false, + writer: Some(writer), write_avail, read_waker, read_paused: false, rl_requested_disconnect: false, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }))) @@ -462,9 +462,9 @@ impl SocketDescriptor { } impl peer_handler::SocketDescriptor for SocketDescriptor { fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize { - // To send data, we take a lock on our Connection to access the TcpStream, writing to it if - // there's room in the kernel buffer, or otherwise create a new Waker with a - // SocketDescriptor in it which can wake up the write_avail Sender, waking up the + // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream, + // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with + // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the // processing future which will call write_buffer_space_avail and we'll end up back here. let mut us = self.conn.lock().unwrap(); if us.writer.is_none() { @@ -484,18 +484,24 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { let mut ctx = task::Context::from_waker(&waker); let mut written_len = 0; loop { - match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) { - task::Poll::Ready(Ok(())) => { - match us.writer.as_ref().unwrap().try_write(&data[written_len..]) { - Ok(res) => { - debug_assert_ne!(res, 0); - written_len += res; - if written_len == data.len() { return written_len; } - }, - Err(_) => return written_len, - } + match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) { + task::Poll::Ready(Ok(res)) => { + // The tokio docs *seem* to indicate this can't happen, and I certainly don't + // know how to handle it if it does (cause it should be a Poll::Pending + // instead): + assert_ne!(res, 0); + written_len += res; + if written_len == data.len() { return written_len; } + }, + task::Poll::Ready(Err(e)) => { + // The tokio docs *seem* to indicate this can't happen, and I certainly don't + // know how to handle it if it does (cause it should be a Poll::Pending + // instead): + assert_ne!(e.kind(), io::ErrorKind::WouldBlock); + // Probably we've already been closed, just return what we have and let the + // read thread handle closing logic. + return written_len; }, - task::Poll::Ready(Err(_)) => return written_len, task::Poll::Pending => { // We're queued up for a write event now, but we need to make sure we also // pause read given we're now waiting on the remote end to ACK (and in diff --git a/lightning/rustfmt.toml b/lightning/rustfmt.toml new file mode 100644 index 00000000000..c7ad93bafe3 --- /dev/null +++ b/lightning/rustfmt.toml @@ -0,0 +1 @@ +disable_all_formatting = true diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index e87d082d9a7..f424baeb919 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -444,7 +444,7 @@ where C::Target: chain::Filter, let monitor_states = self.monitors.read().unwrap(); for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| { for chan in ignored_channels { - if chan.funding_txo.as_ref() == Some(funding_outpoint) { + if chan.funding_txo.as_ref() == Some(funding_outpoint) || chan.original_funding_outpoint.as_ref() == Some(funding_outpoint) { return false; } } @@ -624,6 +624,15 @@ where C::Target: chain::Filter, ) } } + + /// Retrieves the latest holder commitment transaction (and possibly HTLC transactions) for + /// the channel identified with the given `funding_txo`. Errors if no monitor is registered + /// for the given `funding_txo`. + pub fn get_latest_holder_commitment_txn(&self, funding_txo: &OutPoint) -> Result, ()> { + let monitors = self.monitors.read().unwrap(); + let monitor = monitors.get(funding_txo).ok_or(())?; + Ok(monitor.monitor.get_latest_holder_commitment_txn_internal(&self.logger)) + } } impl @@ -748,6 +757,33 @@ where C::Target: chain::Filter, Ok(persist_res) } + fn update_channel_funding_txo(&self, old_funding_txo: OutPoint, new_funding_txo: OutPoint, channel_value_satoshis: u64) -> ChannelMonitorUpdateStatus { + let mut monitors = self.monitors.write().unwrap(); + let monitor_opt = monitors.get_mut(&old_funding_txo); + match monitor_opt { + None => { + log_error!(self.logger, "Failed to update channel monitor funding txo: no such monitor registered"); + + // We should never ever trigger this from within ChannelManager. Technically a + // user could use this object with some proxying in between which makes this + // possible, but in tests and fuzzing, this should be a panic. + #[cfg(any(test, fuzzing))] + panic!("ChannelManager generated a channel update for a channel that was not yet registered!"); + #[cfg(not(any(test, fuzzing)))] + return ChannelMonitorUpdateStatus::UnrecoverableError; + }, + Some(monitor_state) => { + let spk = monitor_state.monitor.update_funding_info(new_funding_txo, channel_value_satoshis); + if let Some(filter) = &self.chain_source { + filter.register_output(WatchedOutput { block_hash: None, outpoint: new_funding_txo, script_pubkey: spk }); + } + return ChannelMonitorUpdateStatus::Completed; + } + } + } + + /// Note that we persist the given `ChannelMonitor` update while holding the + /// `ChainMonitor` monitors lock. fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); @@ -827,7 +863,7 @@ where C::Target: chain::Filter, } let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { - let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_outpoint = monitor_state.monitor.get_original_funding_txo().0; let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index bd0c1548428..5aba7ea1ef9 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -757,6 +757,7 @@ pub(crate) struct ChannelMonitorImpl { channel_keys_id: [u8; 32], holder_revocation_basepoint: PublicKey, funding_info: (OutPoint, Script), + original_funding_info: Option<(OutPoint, Script)>, current_counterparty_commitment_txid: Option, prev_counterparty_commitment_txid: Option, @@ -947,6 +948,13 @@ impl Writeable for ChannelMonitorImpl ChannelMonitor { channel_keys_id, holder_revocation_basepoint, funding_info, + original_funding_info: None, current_counterparty_commitment_txid: None, prev_counterparty_commitment_txid: None, @@ -1277,6 +1286,31 @@ impl ChannelMonitor { txid, htlc_outputs, commitment_number, their_per_commitment_point, logger) } + pub(crate) fn update_funding_info(&self, fund_outpoint: OutPoint, channel_value_satoshis: u64) -> Script { + let mut inner = self.inner.lock().unwrap(); + let script = inner.funding_info.1.clone(); + if let Some(original) = inner.original_funding_info.as_ref() { + if fund_outpoint == original.0 { + inner.original_funding_info = None; + inner.onchain_tx_handler.channel_transaction_parameters.original_funding_outpoint = None; + } + } else { + let original_funding_txo = inner.funding_info.0; + let original_funding_script_pubkey = &inner.funding_info.1; + + inner.original_funding_info = Some((original_funding_txo, original_funding_script_pubkey.clone())); + inner.onchain_tx_handler.channel_transaction_parameters.original_funding_outpoint = Some(original_funding_txo); + } + inner.outputs_to_watch.insert(fund_outpoint.txid, vec![(fund_outpoint.index as u32, script.clone())]); + + inner.funding_info = (fund_outpoint, script.clone()); + inner.onchain_tx_handler.channel_transaction_parameters.funding_outpoint = Some(fund_outpoint); + + inner.channel_value_satoshis = channel_value_satoshis; + inner.onchain_tx_handler.signer.set_channel_value_satoshis(channel_value_satoshis); + script + } + #[cfg(test)] fn provide_latest_holder_commitment_tx( &self, holder_commitment_tx: HolderCommitmentTransaction, @@ -1333,6 +1367,11 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_funding_txo().clone() } + /// + pub fn get_original_funding_txo(&self) -> (OutPoint, Script) { + self.inner.lock().unwrap().get_original_funding_txo().clone() + } + /// Gets a list of txids, with their output scripts (in the order they appear in the /// transaction), which we must learn about spends of via block_connected(). pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, Script)>)> { @@ -1501,6 +1540,11 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger) } + pub(crate) fn get_latest_holder_commitment_txn_internal(&self, logger: &L) -> Vec + where L::Target: Logger { + self.inner.lock().unwrap().get_latest_holder_commitment_txn_internal(logger) + } + /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate /// revoked commitment transaction. @@ -2768,6 +2812,10 @@ impl ChannelMonitorImpl { &self.funding_info } + pub fn get_original_funding_txo(&self) -> &(OutPoint, Script) { + &self.original_funding_info.as_ref().unwrap_or(&self.funding_info) + } + pub fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because @@ -3298,8 +3346,12 @@ impl ChannelMonitorImpl { } pub fn get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { - log_debug!(logger, "Getting signed latest holder commitment transaction!"); self.holder_tx_signed = true; + self.get_latest_holder_commitment_txn_internal(logger) + } + + pub(crate) fn get_latest_holder_commitment_txn_internal(&mut self, logger: &L) -> Vec where L::Target: Logger { + log_debug!(logger, "Getting signed latest holder commitment transaction!"); let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); let mut holder_transactions = vec![commitment_tx]; @@ -3466,7 +3518,14 @@ impl ChannelMonitorImpl { // (except for HTLC transactions for channels with anchor outputs), which is an easy // way to filter out any potential non-matching txn for lazy filters. let prevout = &tx.input[0].previous_output; - if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { + let match_prevout = |outpoint: &OutPoint| { + prevout.txid == outpoint.txid && prevout.vout == outpoint.index as u32 + }; + let is_split = tx.output.len() == 2 && tx.output[0].script_pubkey == tx.output[1].script_pubkey; + let is_match = match_prevout(&self.funding_info.0) || + (self.original_funding_info.is_some() && match_prevout(&self.original_funding_info.as_ref().unwrap().0) && !is_split); + + if is_match { let mut balance_spendable_csv = None; log_info!(logger, "Channel {} closed by funding output spend in txid {}.", &self.funding_info.0.to_channel_id(), txid); @@ -4219,6 +4278,16 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP index: Readable::read(reader)?, }; let funding_info = (outpoint, Readable::read(reader)?); + let original_funding_info = match ::read(reader)? { + 0 => { + let outpoint = Readable::read(reader)?; + let script = Readable::read(reader)?; + Some((outpoint, script)) + }, + 1 => { None }, + _ => return Err(DecodeError::InvalidValue), + }; + let current_counterparty_commitment_txid = Readable::read(reader)?; let prev_counterparty_commitment_txid = Readable::read(reader)?; @@ -4428,6 +4497,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP channel_keys_id, holder_revocation_basepoint, funding_info, + original_funding_info, current_counterparty_commitment_txid, prev_counterparty_commitment_txid, @@ -4683,7 +4753,8 @@ mod tests { selected_contest_delay: 67, }), funding_outpoint: Some(funding_outpoint), - channel_type_features: ChannelTypeFeatures::only_static_remote_key() + channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + original_funding_outpoint: None, }; // Prune with one old state and a holder commitment tx holding a few overlaps with the // old state. diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 89e0b155cf6..d1a38d44adb 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -288,6 +288,10 @@ pub trait Watch { /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; + /// Update the outpoint funding the channel. To be used when the channel is split into two to + /// open a DLC channel with the same funding transaction. + fn update_channel_funding_txo(&self, old_funding_txo: OutPoint, new_funding_txo: OutPoint, channel_value_satoshis: u64) -> ChannelMonitorUpdateStatus; + /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. /// diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index d1489e27168..4040f2de06e 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -875,11 +875,22 @@ pub struct ChannelTransactionParameters { /// The late-bound counterparty channel transaction parameters. /// These parameters are populated at the point in the protocol where the counterparty provides them. pub counterparty_parameters: Option, - /// The late-bound funding outpoint + /// The late-bound funding outpoint. + /// + /// If it's a vanilla LN channel, this value corresponds to the actual funding outpoint that + /// goes on-chain when the channel is created. + /// + /// If instead we're dealing with a split channel, this value corresponds to the output of a + /// glue transaction which sits in between the funding transaction and the commitment + /// transaction. pub funding_outpoint: Option, /// This channel's type, as negotiated during channel open. For old objects where this field /// wasn't serialized, it will default to static_remote_key at deserialization. - pub channel_type_features: ChannelTypeFeatures + pub channel_type_features: ChannelTypeFeatures, + /// This value always corresponds to the actual funding outpoint. This is different to + /// [`ChannelTransactionParameters::funding_outpoint`], which varies depending on the type + /// of Lightning channel we have. + pub original_funding_outpoint: Option, } /// Late-bound per-channel counterparty data used to build transactions. @@ -938,6 +949,7 @@ impl Writeable for ChannelTransactionParameters { (8, self.funding_outpoint, option), (10, legacy_deserialization_prevention_marker, option), (11, self.channel_type_features, required), + (14, self.original_funding_outpoint, option), }); Ok(()) } @@ -952,6 +964,7 @@ impl Readable for ChannelTransactionParameters { let mut funding_outpoint = None; let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut original_funding_outpoint = None; read_tlv_fields!(reader, { (0, holder_pubkeys, required), @@ -961,6 +974,7 @@ impl Readable for ChannelTransactionParameters { (8, funding_outpoint, option), (10, _legacy_deserialization_prevention_marker, option), (11, channel_type_features, option), + (14, original_funding_outpoint, option), }); let mut additional_features = ChannelTypeFeatures::empty(); @@ -973,7 +987,8 @@ impl Readable for ChannelTransactionParameters { is_outbound_from_holder: is_outbound_from_holder.0.unwrap(), counterparty_parameters, funding_outpoint, - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + original_funding_outpoint, }) } } @@ -1099,6 +1114,7 @@ impl HolderCommitmentTransaction { counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: channel_pubkeys.clone(), selected_contest_delay: 0 }), funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }), channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + original_funding_outpoint: None, }; let mut counterparty_htlc_sigs = Vec::new(); for _ in 0..htlcs.len() { @@ -1879,13 +1895,14 @@ mod tests { let holder_pubkeys = signer.pubkeys(); let counterparty_pubkeys = counterparty_signer.pubkeys().clone(); let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); - let channel_parameters = ChannelTransactionParameters { + let channel_parameters = ChannelTransactionParameters { holder_pubkeys: holder_pubkeys.clone(), holder_selected_contest_delay: 0, is_outbound_from_holder: false, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 0 }), funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }), channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + original_funding_outpoint: None, }; let htlcs_with_aux = Vec::new(); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a61a8de82de..48cc60eba29 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -713,7 +713,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { latest_monitor_update_id: u64, - holder_signer: ChannelSignerType<::Signer>, + pub(crate) holder_signer: ChannelSignerType<::Signer>, shutdown_scriptpubkey: Option, destination_script: Script, @@ -933,7 +933,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// The unique identifier used to re-derive the private key material for the channel through /// [`SignerProvider::derive_channel_signer`]. - channel_keys_id: [u8; 32], + pub(crate) channel_keys_id: [u8; 32], /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we /// store it here and only release it to the `ChannelManager` once it asks for it. @@ -1070,6 +1070,29 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_transaction_parameters.funding_outpoint } + /// Returns the funding txo which is always the one that was confirmed on chain, even if the + /// channel is split. + pub fn get_original_funding_txo(&self) -> Option { + self.channel_transaction_parameters.original_funding_outpoint.or(self.get_funding_txo()) + } + + /// Set the funding output and value of the channel. + fn set_funding_outpoint(&mut self, funding_outpoint: &OutPoint, channel_value_satoshis: u64, own_balance: u64) + { + self.channel_value_satoshis = channel_value_satoshis; + self.holder_signer.as_mut().set_channel_value_satoshis(channel_value_satoshis); + self.value_to_self_msat = own_balance + self.pending_outbound_htlcs.iter().map(|x| x.amount_msat).sum::(); + + let original_funding_outpoint = self.channel_transaction_parameters.original_funding_outpoint.unwrap_or_else(|| self.channel_transaction_parameters.funding_outpoint.unwrap()); + self.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint.clone()); + self.channel_transaction_parameters.original_funding_outpoint = if &original_funding_outpoint != funding_outpoint { + Some(original_funding_outpoint.clone()) + } else { + None + }; + } + + /// Returns the block hash in which our funding transaction was confirmed. pub fn get_funding_tx_confirmed_in(&self) -> Option { self.funding_tx_confirmed_in @@ -2043,7 +2066,7 @@ impl ChannelContext where SP::Target: SignerProvider { _ => {} } } - let monitor_update = if let Some(funding_txo) = self.get_funding_txo() { + let monitor_update = if let Some(funding_txo) = self.get_original_funding_txo() { // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), // returning a channel monitor update here would imply a channel monitor update before // we even registered the channel monitor to begin with, which is invalid. @@ -3880,7 +3903,7 @@ impl Channel where Ok(()) } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { + pub(super) fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); msgs::RevokeAndACK { @@ -4878,8 +4901,54 @@ impl Channel where msgs = (Some(channel_ready), announcement_sigs); } } + + // If we have a vanilla LN channel, this checks if the transaction + // spends from the actual funding output. That could be either a + // commitment transaction or a mutual close transaction. + // + // If we have a split channel, this checks if the transaction spends + // from the glue output. That could only be a commitment + // transaction. + let is_funding_or_glue_txo = |prev_outpoint: &bitcoin::OutPoint| -> bool { + prev_outpoint == &funding_txo.into_bitcoin_outpoint() + }; + + // This check only runs if the check above returns `false`. We know + // that a vanilla LN channel can only be closed by spending from the + // original funding output, so in this check we are only considering + // split channels. + // + // The other ways in which a split channel could be closed are: + // + // - Through a mutual close of the _LN_ channel, which would spend + // directly from the original funding output. + // + // - Through the publication of a revoked commitment transaction + // spending from the original funding output! + // + // And that's exactly what we check here: whether the transaction + // spends from the original funding output and, if it does, whether + // the transaction is NOT the split transaction (the only other + // possible option). + // + // We do not announce the closing of the LN channel with the split + // transaction, because that is reserved to either mutual close or + // commitment transactions. LDK will only react to this announcement + // once, so we should not waste it on the split transaction, as this + // can lead to loss of funds. + let is_final_tx_spending_from_original_funding_txo = |prev_outpoint: &bitcoin::OutPoint, outputs: &[bitcoin::TxOut]| -> bool { + match self.context.get_original_funding_txo().map(|x| x.into_bitcoin_outpoint()) { + Some(original_funding_outpoint) => { + // Transaction spends from actual funding output. + prev_outpoint == &original_funding_outpoint && + // Transaction is _not_ a split transaction. + !(outputs.len() == 2 && outputs[0].script_pubkey == outputs[1].script_pubkey) + } + None => false, + } + }; for inp in tx.input.iter() { - if inp.previous_output == funding_txo.into_bitcoin_outpoint() { + if is_funding_or_glue_txo(&inp.previous_output) || is_final_tx_spending_from_original_funding_txo(&inp.previous_output, &tx.output) { log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id()); return Err(ClosureReason::CommitmentTxConfirmed); } @@ -5251,6 +5320,7 @@ impl Channel where // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the // txid of that interactive transaction, else we MUST NOT set it. next_funding_txid: None, + sub_channel_state: None, } } @@ -5499,9 +5569,9 @@ impl Channel where signature = res.0; htlc_signatures = res.1; - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} with value {} -> {} in channel {}", encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), - &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), + &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), self.context.channel_value_satoshis, log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { @@ -5567,9 +5637,6 @@ impl Channel where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - /// - /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no - /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option) -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> @@ -5595,16 +5662,9 @@ impl Channel where return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } - // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown - // script is set, we just force-close and call it a day. - let mut chan_closed = false; - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - chan_closed = true; - } - let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, - None if !chan_closed => { + None => { // use override shutdown script if provided let shutdown_scriptpubkey = match override_shutdown_script { Some(script) => script, @@ -5622,16 +5682,11 @@ impl Channel where self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, - None => false, }; // From here on out, we may not fail! self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - } else { - self.context.channel_state |= ChannelState::LocalShutdownSent as u32; - } + self.context.channel_state |= ChannelState::LocalShutdownSent as u32; self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { @@ -5681,6 +5736,21 @@ impl Channel where }) .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) } + + /// Set the funding output and value of the channel, returning a `ChannelMonitorUpdate` + /// containing a commitment for the new funding output if requested. + pub fn set_funding_outpoint(&mut self, funding_outpoint: &OutPoint, channel_value_satoshis: u64, own_balance: u64, need_commitment: bool, logger: &L) -> Option + where + L::Target: Logger + { + self.context.set_funding_outpoint(funding_outpoint, channel_value_satoshis, own_balance); + + if need_commitment { + let monitor_update = self.build_commitment_no_status_check(logger); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.push_ret_blockable_mon_update(monitor_update) + } else { None } + } } /// A not-yet-funded outbound (from holder) channel using V1 channel establishment. @@ -5847,7 +5917,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { is_outbound_from_holder: true, counterparty_parameters: None, funding_outpoint: None, - channel_type_features: channel_type.clone() + channel_type_features: channel_type.clone(), + original_funding_outpoint: None, }, funding_transaction: None, is_batch_funding: None, @@ -6500,7 +6571,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { pubkeys: counterparty_pubkeys, }), funding_outpoint: None, - channel_type_features: channel_type.clone() + channel_type_features: channel_type.clone(), + original_funding_outpoint: None, }, funding_transaction: None, is_batch_funding: None, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 62c6741fbdf..d439e2af91b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -21,6 +21,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::{genesis_block, ChainHash}; use bitcoin::network::constants::Network; +use bitcoin::Script; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -79,6 +80,11 @@ use core::ops::Deref; // Re-export this for use in the public API. pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; +use super::msgs::{CommitmentSigned, RevokeAndACK}; + +/// A tuple containing a [`CommitmentSigned`] message and the commitment transaction number it +/// corresponds to. +pub type NumberedCommitmentSigned = (CommitmentSigned, u64); // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -783,6 +789,18 @@ struct PendingInboundPayment { min_value_msat: Option, } +/// A structure holding a reference to a channel while under lock. +pub struct ChannelLock<'a, SP: Deref> where SP::Target: SignerProvider { + channel: &'a mut Channel, + mon_update_blocked: bool, +} + +impl<'a, SP: Deref> ChannelLock<'a, SP> where SP::Target: SignerProvider { + fn get_channel(&mut self) -> &mut Channel { + self.channel + } +} + /// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g. /// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case @@ -1630,6 +1648,17 @@ pub struct ChannelDetails { /// /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. pub config: Option, + /// The late bound redeemscript used for the funding output. + pub funding_redeemscript: Option