diff --git a/.gitmodules b/.gitmodules index 6c84ae9c6..3421a1624 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "go-ethereum"] path = go-ethereum - url = https://github.com/OffchainLabs/go-ethereum.git + url = git@github.com:Layr-Labs/nitro-go-ethereum-private.git [submodule "fastcache"] path = fastcache url = https://github.com/OffchainLabs/fastcache.git diff --git a/Dockerfile b/Dockerfile index faa0bd6db..408a04cb5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -68,7 +68,7 @@ COPY ./blsSignatures ./blsSignatures COPY ./cmd/chaininfo ./cmd/chaininfo COPY ./cmd/replay ./cmd/replay COPY ./das/dastree ./das/dastree -COPY ./das/eigenda ./das/eigenda +COPY ./eigenda ./eigenda COPY ./precompiles ./precompiles COPY ./statetransfer ./statetransfer COPY ./util ./util diff --git a/arbitrator/jit/src/syscall.rs b/arbitrator/jit/src/syscall.rs index 4f657eeef..0746f5ebf 100644 --- a/arbitrator/jit/src/syscall.rs +++ b/arbitrator/jit/src/syscall.rs @@ -82,6 +82,7 @@ enum DynamicObject { #[derive(Clone, Debug)] pub struct PendingEvent { pub id: JsValue, + #[allow(dead_code)] pub this: JsValue, pub args: Vec, } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index ea96c11fd..3ed816415 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -42,7 +42,7 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" @@ -64,8 +64,9 @@ var ( const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" - sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" - sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" + sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" + sequencerBatchPostWithEigendaMethodName = "addSequencerL2BatchFromEigenDA" ) type batchPosterPosition struct { @@ -143,6 +144,7 @@ type BatchPosterConfig struct { RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` + PostEigenDA bool `koanf:"post-eigen-da" reload:"hot"` IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` @@ -194,6 +196,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") + f.Bool(prefix+".post-eigen-da", DefaultBatchPosterConfig.PostEigenDA, "Post data to EigenDA") f.Bool(prefix+".ignore-blob-price", DefaultBatchPosterConfig.IgnoreBlobPrice, "if the parent chain supports 4844 blobs and ignore-blob-price is true, post 4844 blobs even if it's not price efficient") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") @@ -221,6 +224,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 50_000, Post4844Blobs: false, + PostEigenDA: false, IgnoreBlobPrice: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, @@ -252,6 +256,30 @@ var TestBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 10_000, Post4844Blobs: true, + PostEigenDA: false, + IgnoreBlobPrice: false, + DataPoster: dataposter.TestDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, + UseAccessLists: true, + GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, +} + +var EigenDABatchPosterConfig = BatchPosterConfig{ + Enable: true, + MaxSize: 100000, + Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, + PollInterval: time.Millisecond * 10, + ErrorDelay: time.Millisecond * 10, + MaxDelay: 0, + WaitForMaxDelay: false, + CompressionLevel: 2, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 10_000, + Post4844Blobs: false, + PostEigenDA: true, IgnoreBlobPrice: false, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, @@ -611,6 +639,7 @@ type buildingBatch struct { msgCount arbutil.MessageIndex haveUsefulMessage bool use4844 bool + useEigenDA bool } func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64, use4844 bool) *batchSegments { @@ -847,11 +876,16 @@ func (b *BatchPoster) encodeAddBatch( l2MessageData []byte, delayedMsg uint64, use4844 bool, + useEigenDA bool, + eigenDaBlobInfo *eigenda.EigenDABlobInfo, ) ([]byte, []kzg4844.Blob, error) { methodName := sequencerBatchPostMethodName if use4844 { methodName = sequencerBatchPostWithBlobsMethodName } + if useEigenDA { + methodName = sequencerBatchPostWithEigendaMethodName + } method, ok := b.seqInboxABI.Methods[methodName] if !ok { return nil, nil, errors.New("failed to find add batch method") @@ -872,6 +906,88 @@ func (b *BatchPoster) encodeAddBatch( new(big.Int).SetUint64(uint64(prevMsgNum)), new(big.Int).SetUint64(uint64(newMsgNum)), ) + } else if useEigenDA { + + blobVerificationProofType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + {Name: "batchID", Type: "uint32"}, + {Name: "blobIndex", Type: "uint32"}, + {Name: "batchMetadata", Type: "tuple", + Components: []abi.ArgumentMarshaling{ + {Name: "batchHeader", Type: "tuple", + Components: []abi.ArgumentMarshaling{ + {Name: "blobHeadersRoot", Type: "bytes32"}, + {Name: "quorumNumbers", Type: "bytes"}, + {Name: "signedStakeForQuorums", Type: "bytes"}, + {Name: "referenceBlockNumber", Type: "uint32"}, + }, + }, + {Name: "signatoryRecordHash", Type: "bytes32"}, + {Name: "confirmationBlockNumber", Type: "uint32"}, + }, + }, + { + Name: "inclusionProof", + Type: "bytes", + }, + { + Name: "quorumIndices", + Type: "bytes", + }, + }) + + if err != nil { + return nil, nil, err + } + + blobHeaderType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + {Name: "commitment", Type: "tuple", Components: []abi.ArgumentMarshaling{ + {Name: "X", Type: "uint256"}, + {Name: "Y", Type: "uint256"}, + }}, + {Name: "dataLength", Type: "uint32"}, + {Name: "quorumBlobParams", Type: "tuple[]", Components: []abi.ArgumentMarshaling{ + {Name: "quorumNumber", Type: "uint8"}, + {Name: "adversaryThresholdPercentage", Type: "uint8"}, + {Name: "confirmationThresholdPercentage", Type: "uint8"}, + {Name: "chunkLength", Type: "uint32"}, + }}, + }) + if err != nil { + return nil, nil, err + } + + u256Type, err := abi.NewType("uint256", "", nil) + if err != nil { + return nil, nil, err + } + + // Create ABI arguments + arguments := abi.Arguments{ + {Type: u256Type}, + {Type: blobVerificationProofType}, + {Type: blobHeaderType}, + {Type: u256Type}, + {Type: u256Type}, + {Type: u256Type}, + } + + // define values array + values := make([]interface{}, 6) + values[0] = seqNum + values[1] = eigenDaBlobInfo.BlobVerificationProof + values[2] = eigenDaBlobInfo.BlobHeader + values[3] = new(big.Int).SetUint64(delayedMsg) + values[4] = new(big.Int).SetUint64(uint64(prevMsgNum)) + values[5] = new(big.Int).SetUint64(uint64(newMsgNum)) + + // pack arguments + // Pack the BlobHeader + calldata, err = arguments.PackValues(values) + + if err != nil { + return nil, nil, err + } + } else { calldata, err = method.Inputs.Pack( seqNum, @@ -887,6 +1003,7 @@ func (b *BatchPoster) encodeAddBatch( } fullCalldata := append([]byte{}, method.ID...) fullCalldata = append(fullCalldata, calldata...) + println("Full calldata: %s", hexutil.Encode(fullCalldata)) return fullCalldata, kzgBlobs, nil } @@ -907,7 +1024,7 @@ func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimat return uint64(gas), err } -func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { +func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList, eigenDaBlobInfo *eigenda.EigenDABlobInfo) (uint64, error) { config := b.config() rpcClient := b.l1Reader.Client() rawRpcClient := rpcClient.Client() @@ -949,7 +1066,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, // However, we set nextMsgNum to 1 because it is necessary for a correct estimation for the final to be non-zero. // Because we're likely estimating against older state, this might not be the actual next message, // but the gas used should be the same. - data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0) + data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0, eigenDaBlobInfo != nil, eigenDaBlobInfo) if err != nil { return 0, err } @@ -1044,11 +1161,19 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } + var useEigenDA bool + if b.eigenDAWriter != nil { + useEigenDA = true + } + + println("use4844", use4844, "useEigenDA", useEigenDA) + b.building = &buildingBatch{ segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate(), use4844), msgCount: batchPosition.MessageCount, startMsgCount: batchPosition.MessageCount, use4844: use4844, + useEigenDA: useEigenDA, } } msgCount, err := b.streamer.GetMessageCount() @@ -1224,26 +1349,16 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } + var blobInfo *eigenda.EigenDABlobInfo if b.daWriter == nil && b.eigenDAWriter != nil { log.Info("Start to write data to eigenda: ", "data", hex.EncodeToString(sequencerMsg)) - daRef, err := b.eigenDAWriter.Store(ctx, sequencerMsg) - if err != nil { - if config.DisableEigenDAFallbackStoreDataOnChain { - log.Warn("Falling back to storing data on chain", "err", err) - return false, errors.New("unable to post batch to EigenDA and fallback storing data on chain is disabled") - } - } - - pointer, err := b.eigenDAWriter.Serialize(daRef) + blobInfo, err = b.eigenDAWriter.Store(ctx, sequencerMsg) if err != nil { - log.Warn("DaRef serialization failed", "err", err) - return false, errors.New("DaRef serialization failed") + return false, err } - log.Info("EigenDA transaction receipt(data pointer): ", "hash", hex.EncodeToString(daRef.BatchHeaderHash), "index", daRef.BlobIndex) - sequencerMsg = pointer } - data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844, b.building.useEigenDA, blobInfo) if err != nil { return false, err } @@ -1258,7 +1373,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // In theory, this might reduce gas usage, but only by a factor that's already // accounted for in `config.ExtraBatchGas`, as that same factor can appear if a user // posts a new delayed message that we didn't see while gas estimating. - gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList) + gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList, blobInfo) if err != nil { return false, err } @@ -1286,6 +1401,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } log.Info( "BatchPoster: batch sent", + "eigenDA", b.building.useEigenDA, "sequenceNumber", batchPosition.NextSeqNum, "from", batchPosition.MessageCount, "to", b.building.msgCount, diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 416ebf725..3214e2fb2 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -1026,6 +1026,7 @@ const minWait = time.Second * 10 func (p *DataPoster) Start(ctxIn context.Context) { p.StopWaiter.Start(ctxIn, p) p.CallIteratively(func(ctx context.Context) time.Duration { + println("Data poster CallIteratively") p.mutex.Lock() defer p.mutex.Unlock() err := p.updateBalance(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 17ec55766..1d2027941 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -23,7 +23,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" m "github.com/offchainlabs/nitro/broadcaster/message" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/containers" ) @@ -616,8 +616,12 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if t.blobReader != nil { daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, t.eigenDA, arbstate.KeysetValidate) + if t.eigenDA != nil { + daProviders = append(daProviders, arbstate.NewDAProviderEigenDA(t.eigenDA)) + } + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) + currentpos := prevbatchmeta.MessageCount + 1 for { if len(backend.batches) == 0 { diff --git a/arbnode/node.go b/arbnode/node.go index b2bf1a1a6..0cd3e99a1 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -33,7 +33,7 @@ import ( "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -546,6 +546,8 @@ func createNodeImpl( eigenDAWriter = eigenDAService } + log.Info("EigenDA reader", "reader", eigenDAReader) + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader, eigenDAReader) if err != nil { return nil, err diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index edda4e551..e66fba1d3 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -14,9 +14,11 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -35,6 +37,7 @@ const ( batchDataSeparateEvent batchDataNone batchDataBlobHashes + batchDataEigenDA ) func init() { @@ -118,6 +121,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut if err != nil { return nil, err } + args := make(map[string]interface{}) err = addSequencerL2BatchFromOriginCallABI.Inputs.UnpackIntoMap(args, data[4:]) if err != nil { @@ -164,6 +168,27 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut data = append(data, h[:]...) } return data, nil + + case batchDataEigenDA: + // get the transaction data from the log + tx, err := arbutil.GetLogTransaction(ctx, client, m.rawLog) + if err != nil { + return nil, err + } + // get the input data from the transaction + // TODO: decide on if you want to parse it here or parse it upstream, I've decided to parse it upstream and include all of the calldata in the batch + calldata := tx.Data() + println("appending EigenDA message header flag to calldata") + // append the eigenDA header flag to the front + data := []byte{eigenda.EigenDAMessageHeaderFlag} + data = append(data, calldata[:]...) + + println(fmt.Sprintf("Returning the following calldata: %s", hexutil.Encode(data))) + + // format of eigenDA data is + // [0 - 1] header flag + // [1 - len(data)] calldata + return data, nil default: return nil, fmt.Errorf("batch has invalid data location %v", m.dataLocation) } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 7d00273bb..8c8001715 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -14,6 +14,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" @@ -25,7 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/zeroheavy" ) @@ -70,7 +71,7 @@ var ( ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") ) -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, eigenDAReader eigenda.EigenDAReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -82,8 +83,12 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash afterDelayedMessages: binary.BigEndian.Uint64(data[32:40]), segments: [][]byte{}, } + + log.Info("Reading calldata payload from sequencer inbox", "calldata", hexutil.Encode(data)) payload := data[40:] log.Info("Inbox parse sequencer message: ", "payload", hex.EncodeToString(payload)) + log.Info("Inbox parse header message: ", "header", hex.EncodeToString(data[:40])) + log.Info("Parsed header", "struct", fmt.Sprintf("%+v", parsedMsg)) // Stage 0: Check if our node is out of date and we don't understand this batch type // If the parent chain sequencer inbox smart contract authenticated this batch, @@ -98,27 +103,12 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // as these headers are validated by the sequencer inbox and not other DASs. // We try to extract payload from the first occuring valid DA provider in the daProviders list if len(payload) > 0 { + println("looking for DA provider") foundDA := false var err error - // detect eigenda message from byte - if eigenda.IsEigenDAMessageHeaderByte(payload[0]) { - if eigenDAReader == nil { - log.Error("No EigenDA Reader configured, but sequencer message found with EigenDA header") - } else { - var err error - payload, err = eigenda.RecoverPayloadFromEigenDABatch(ctx, payload[1:], eigenDAReader, nil) - if err != nil { - return nil, err - } - if payload == nil { - return parsedMsg, nil - } - foundDA = true - } - } - for _, provider := range daProviders { + println(fmt.Sprintf("Reading message from provider: %v", provider)) if provider != nil && provider.IsValidHeaderByte(payload[0]) { payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) if err != nil { @@ -137,6 +127,8 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash log.Error("No DAS Reader configured, but sequencer message found with DAS header") } else if IsBlobHashesHeaderByte(payload[0]) { return nil, ErrNoBlobReader + } else if eigenda.IsEigenDAMessageHeaderByte(payload[0]) { + log.Error("eigenDA versioned batch payload was encountered but no instance of EigenDA was configured") } } } @@ -385,6 +377,35 @@ func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( return payload, nil } +// NewDAProviderEigenDA is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderEigenDA(eigenDAReader eigenda.EigenDAReader) *daProviderForEigenDA { + return &daProviderForEigenDA{ + eigenDAReader: eigenDAReader, + } +} + +type daProviderForEigenDA struct { + eigenDAReader eigenda.EigenDAReader +} + +func (e *daProviderForEigenDA) IsValidHeaderByte(headerByte byte) bool { + return eigenda.IsEigenDAMessageHeaderByte(headerByte) +} + +func (e *daProviderForEigenDA) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + // we start from the 41st byte of sequencerMsg because bytes 0 - 40 are the header, and 40 - 41 is the eigenDA header flag + // we use the binary domain here because this is what we use in the derivation pipeline + return eigenda.RecoverPayloadFromEigenDABatch(ctx, sequencerMsg[41:], e.eigenDAReader, preimages, "binary") +} + type KeysetValidationMode uint8 const KeysetValidate KeysetValidationMode = 0 @@ -395,7 +416,6 @@ type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 daProviders []DataAvailabilityProvider - eigenDAReader eigenda.EigenDAReader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -405,12 +425,11 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, eigenDAReader eigenda.EigenDAReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, daProviders: daProviders, - eigenDAReader: eigenDAReader, keysetValidationMode: keysetValidationMode, } } @@ -424,6 +443,7 @@ const BatchSegmentKindAdvanceL1BlockNumber uint8 = 4 // Pop returns the message from the top of the sequencer inbox and removes it from the queue. // Note: this does *not* return parse errors, those are transformed into invalid messages func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMetadata, error) { + println("Popping message from sequencer inbox") if r.cachedSequencerMessage == nil { // Note: batchBlockHash will be zero in the replay binary, but that's fine bytes, batchBlockHash, realErr := r.backend.PeekSequencerInbox() @@ -432,7 +452,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.eigenDAReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index dcf43fd0d..b34c02534 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -67,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 1c8b85810..7f822eff6 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -37,6 +37,10 @@ func main() { ctx := context.Background() + /* EigenDA dependency contracts */ + svcManagerString := flag.String("svcManager", "0x0000000000000000000000000000000000000000", "the address of the eigenda service manager contract") + daRollupManagerString := flag.String("daRollupManager", "0x0000000000000000000000000000000000000000", "the address of the eigenda rollup manager contract") + l1conn := flag.String("l1conn", "", "l1 connection") l1keystore := flag.String("l1keystore", "", "l1 private key store") deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") @@ -178,6 +182,9 @@ func main() { defer l1Reader.StopAndWait() nativeToken := common.HexToAddress(*nativeTokenAddressString) + eigenDASvcManager := common.HexToAddress(*svcManagerString) + eigenDARollupManager := common.HexToAddress(*daRollupManagerString) + deployedAddresses, err := deploycode.DeployOnL1( ctx, l1Reader, @@ -189,6 +196,8 @@ func main() { nativeToken, maxDataSize, *isUsingFeeToken, + eigenDASvcManager, + eigenDARollupManager, ) if err != nil { flag.Usage() diff --git a/cmd/replay/main.go b/cmd/replay/main.go index f9c1a84a7..12fd92190 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -31,7 +31,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/wavmio" ) @@ -150,18 +150,30 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error { return nil } -// struct for recovering data from preimage, impl interface EigenDAReader - -func (dasReader *PreimageEigenDAReader) QueryBlob(ctx context.Context, ref *eigenda.EigenDARef) ([]byte, error) { - dataPointer, err := ref.Serialize() +// QueryBlob returns the blob for the given cert from the preimage oracle using the hash of the +// certificate kzg commitment for identifying the preimage. +func (dasReader *PreimageEigenDAReader) QueryBlob(ctx context.Context, cert *eigenda.EigenDABlobInfo, domain string) ([]byte, error) { + kzgCommit, err := cert.SerializeCommitment() if err != nil { return nil, err } shaDataHash := sha256.New() - shaDataHash.Write(dataPointer) + shaDataHash.Write(kzgCommit) dataHash := shaDataHash.Sum([]byte{}) - // check function eigenda.RecoverPayloadFromEigenDABatch, the data population and data reading should be matched. - return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, common.BytesToHash(dataHash)) + dataHash[0] = 1 + preimage, err := wavmio.ResolveTypedPreimage(arbutil.EigenDaPreimageType, common.BytesToHash(dataHash)) + if err != nil { + return nil, err + } + + // since the preimage is in encoded co-efficient form, we need to decode it to get the actual blob + // i.e,polynomial -> FFT -> length decode -> inverse onec -> blob + decodedBlob, err := eigenda.DecodeiFFTBlob(preimage) + if err != nil { + println("Error decoding blob: ", err) + return nil, err + } + return decodedBlob, nil } // To generate: @@ -213,18 +225,18 @@ func main() { panic(fmt.Sprintf("Error opening state db: %v", err.Error())) } - readMessage := func(dasEnabled bool) *arbostypes.MessageWithMetadata { + readMessage := func(dasEnabled bool, eigenDAEnabled bool) *arbostypes.MessageWithMetadata { var delayedMessagesRead uint64 if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - // due to the lack of abstraction, we have to define our own Reader here. - // once we have a way to unify the interface between DataAvailabilityReader and EigenDAReader, we should be able to retain the old struct. - // todo make it compatible with dasReader - // var dasReader arbstate.DataAvailabilityReader + var dasReader *PreimageDASReader + var eigenDAReader *PreimageEigenDAReader if dasEnabled { dasReader = &PreimageDASReader{} + } else if eigenDAEnabled { + eigenDAReader = &PreimageEigenDAReader{} } backend := WavmInbox{} var keysetValidationMode = arbstate.KeysetPanicIfInvalid @@ -232,12 +244,15 @@ func main() { keysetValidationMode = arbstate.KeysetDontValidate } var daProviders []arbstate.DataAvailabilityProvider - // TODO: add dasReader of type eigenda.EigenDAReader when it conforms to interface + if dasReader != nil { daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) } + if eigenDAReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderEigenDA(eigenDAReader)) + } daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, nil, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { @@ -289,8 +304,7 @@ func main() { } } - // message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) - message := readMessage(true) + message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee, chainConfig.ArbitrumChainParams.EigenDA) chainContext := WavmChainContext{} batchFetcher := func(batchNum uint64) ([]byte, error) { @@ -303,8 +317,7 @@ func main() { } else { // Initialize ArbOS with this init message and create the genesis block. - - message := readMessage(false) + message := readMessage(false, false) initMessage, err := message.Message.ParseInitMessage() if err != nil { diff --git a/contracts b/contracts index a6edf0994..2a561f885 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit a6edf099466faa6b57f3369f0db57dfc2be7e270 +Subproject commit 2a561f88513b1473bd333dbc88f9b11c21df35be diff --git a/das/eigenda/eigenda.go b/das/eigenda/eigenda.go deleted file mode 100644 index 4e4fde501..000000000 --- a/das/eigenda/eigenda.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2024-2024, Alt Research, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package eigenda - -import ( - "bytes" - "context" - "crypto/sha256" - "crypto/tls" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "time" - - "github.com/Layr-Labs/eigenda/api/grpc/disperser" - "github.com/Layr-Labs/eigenda/encoding/utils/codec" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -// hasBits returns true if `checking` has all `bits` -func hasBits(checking byte, bits byte) bool { - return (checking & bits) == bits -} - -// EigenDAMessageHeaderFlag indicated that the message is a EigenDARef which will be used to retrieve data from EigenDA -const EigenDAMessageHeaderFlag byte = 0xed - -func IsEigenDAMessageHeaderByte(header byte) bool { - return hasBits(header, EigenDAMessageHeaderFlag) -} - -type EigenDAWriter interface { - Store(context.Context, []byte) (*EigenDARef, error) - Serialize(eigenDARef *EigenDARef) ([]byte, error) -} - -type EigenDAReader interface { - QueryBlob(ctx context.Context, ref *EigenDARef) ([]byte, error) -} - -type EigenDAConfig struct { - Enable bool `koanf:"enable"` - Rpc string `koanf:"rpc"` -} - -func (ec *EigenDAConfig) String() { - fmt.Println(ec.Enable) - fmt.Println(ec.Rpc) - // fmt.Sprintf("enable: %b, rpc: %s", ec.Enable, ec.Rpc) -} - -type EigenDARef struct { - BatchHeaderHash []byte - BlobIndex uint32 -} - -func (b *EigenDARef) Serialize() ([]byte, error) { - buf := new(bytes.Buffer) - err := binary.Write(buf, binary.BigEndian, b.BlobIndex) - if err != nil { - return nil, err - } - _, err = buf.Write(b.BatchHeaderHash) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (b *EigenDARef) Deserialize(data []byte) error { - buf := bytes.NewReader(data) - err := binary.Read(buf, binary.BigEndian, &b.BlobIndex) - if err != nil { - return err - } - // _, err = buf.Read(b.BatchHeaderHash) - err = binary.Read(buf, binary.BigEndian, &b.BatchHeaderHash) - if err != nil { - return err - } - return nil -} - -type EigenDA struct { - client disperser.DisperserClient -} - -func NewEigenDA(rpc string) (*EigenDA, error) { - // nolint:gosec - creds := credentials.NewTLS(&tls.Config{ - InsecureSkipVerify: true, - }) - conn, err := grpc.Dial(rpc, grpc.WithTransportCredentials(creds)) - if err != nil { - return nil, err - } - return &EigenDA{ - client: disperser.NewDisperserClient(conn), - }, nil -} - -func (e *EigenDA) QueryBlob(ctx context.Context, ref *EigenDARef) ([]byte, error) { - res, err := e.client.RetrieveBlob(ctx, &disperser.RetrieveBlobRequest{ - BatchHeaderHash: ref.BatchHeaderHash, - BlobIndex: ref.BlobIndex, - }) - if err != nil { - return nil, err - } - decodedData := codec.RemoveEmptyByteFromPaddedBytes(res.GetData()) - return decodedData, nil -} - -func (e *EigenDA) Store(ctx context.Context, data []byte) (*EigenDARef, error) { - encodedData := codec.ConvertByPaddingEmptyByte(data) - disperseBlobRequest := &disperser.DisperseBlobRequest{ - Data: encodedData, - } - - res, err := e.client.DisperseBlob(ctx, disperseBlobRequest) - if err != nil { - return nil, err - } - - ticker := time.NewTicker(time.Second * 5) - defer ticker.Stop() - - var ref *EigenDARef - for range ticker.C { - statusReply, err := e.GetBlobStatus(ctx, res.GetRequestId()) - if err != nil { - log.Error("[eigenda]: GetBlobStatus: ", "error", err.Error()) - continue - } - switch statusReply.GetStatus() { - case disperser.BlobStatus_CONFIRMED, disperser.BlobStatus_FINALIZED: - ref = &EigenDARef{ - BatchHeaderHash: statusReply.GetInfo().GetBlobVerificationProof().GetBatchMetadata().GetBatchHeaderHash(), - BlobIndex: statusReply.GetInfo().GetBlobVerificationProof().GetBlobIndex(), - } - return ref, nil - case disperser.BlobStatus_FAILED: - return nil, errors.New("disperser blob failed") - default: - continue - } - } - return nil, errors.New("disperser blob query status timeout") - -} - -func (e *EigenDA) GetBlobStatus(ctx context.Context, reqeustId []byte) (*disperser.BlobStatusReply, error) { - blockStatusRequest := &disperser.BlobStatusRequest{ - RequestId: reqeustId, - } - return e.client.GetBlobStatus(ctx, blockStatusRequest) -} - -// Serialize implements EigenDAWriter. -func (e *EigenDA) Serialize(eigenDARef *EigenDARef) ([]byte, error) { - eigenDARefData, err := eigenDARef.Serialize() - if err != nil { - log.Warn("eigenDARef serialize error", "err", err) - return nil, err - } - buf := new(bytes.Buffer) - err = binary.Write(buf, binary.BigEndian, EigenDAMessageHeaderFlag) - if err != nil { - log.Warn("batch type byte serialization failed", "err", err) - return nil, err - } - err = binary.Write(buf, binary.BigEndian, eigenDARefData) - - if err != nil { - log.Warn("data pointer serialization failed", "err", err) - return nil, err - } - serializedBlobPointerData := buf.Bytes() - return serializedBlobPointerData, nil -} - -func RecoverPayloadFromEigenDABatch(ctx context.Context, - sequencerMsg []byte, - daReader EigenDAReader, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, -) ([]byte, error) { - log.Info("Start recovering payload from eigenda: ", "data", hex.EncodeToString(sequencerMsg)) - var shaPreimages map[common.Hash][]byte - if preimages != nil { - if preimages[arbutil.Sha2_256PreimageType] == nil { - preimages[arbutil.Sha2_256PreimageType] = make(map[common.Hash][]byte) - } - shaPreimages = preimages[arbutil.Sha2_256PreimageType] - } - var daRef EigenDARef - daRef.BlobIndex = binary.BigEndian.Uint32(sequencerMsg[:4]) - daRef.BatchHeaderHash = sequencerMsg[4:] - log.Info("Data pointer: ", "info", hex.EncodeToString(daRef.BatchHeaderHash), "index", daRef.BlobIndex) - data, err := daReader.QueryBlob(ctx, &daRef) - if err != nil { - log.Error("Failed to query data from EigenDA", "err", err) - return nil, err - } - // record preimage data - log.Info("Recording preimage data for EigenDA") - shaDataHash := sha256.New() - shaDataHash.Write(sequencerMsg) - dataHash := shaDataHash.Sum([]byte{}) - if shaPreimages != nil { - shaPreimages[common.BytesToHash(dataHash)] = data - } - return data, nil -} diff --git a/deploy/deploy.go b/deploy/deploy.go index 33d64d161..f856740c5 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" @@ -41,23 +42,19 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - dummyManager, tx, _, err := bridgegen.DeployEigenDADummyManager(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("dummy manager deploy error: %w", err) - } - reader4844, tx, _, err := yulgen.DeployReader4844(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844, dummyManager, dummyManager, isUsingFeeToken) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844, isUsingFeeToken) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) } + println("Sequencer inbox deployed at ", seqInboxTemplate.String()) + inboxTemplate, tx, _, err := bridgegen.DeployInbox(auth, client, maxDataSize) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { @@ -240,11 +237,37 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, nil } -func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPosters []common.Address, batchPosterManager common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int, isUsingFeeToken bool) (*chaininfo.RollupAddresses, error) { +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPosters []common.Address, batchPosterManager common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int, isUsingFeeToken bool, eigenDASvcManager common.Address, eigenDARollupManager common.Address) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } + if eigenDARollupManager == (common.Address{0x0}) { + log.Warn("No EigenDA Rollup Manager contract address specified, deploying dummy rollup manager instead") + + dummyRollupManager, tx, _, err := bridgegen.DeployEigenDADummyManager(deployAuth, parentChainReader.Client()) + err = andTxSucceeded(ctx, parentChainReader, tx, err) + if err != nil { + return nil, fmt.Errorf("dummy manager deploy error: %w", err) + } + + log.Info("Dummy eigenda rollup manager deployed", "address", dummyRollupManager.String()) + eigenDARollupManager = dummyRollupManager + } + + if eigenDASvcManager == (common.Address{0x0}) { + log.Warn("No EigenDA Service Manager contract address specified, deploying dummy service manager instead") + + dummySvcManager, tx, _, err := bridgegen.DeployDummyServiceManager(deployAuth, parentChainReader.Client()) + err = andTxSucceeded(ctx, parentChainReader, tx, err) + if err != nil { + return nil, fmt.Errorf("dummy svc manager deploy error: %w", err) + } + + log.Info("Dummy eigenda service manager", "address", dummySvcManager.String()) + eigenDASvcManager = dummySvcManager + + } rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize, isUsingFeeToken) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) @@ -264,6 +287,8 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed BatchPosters: batchPosters, BatchPosterManager: batchPosterManager, + EigenDAServiceManager: eigenDASvcManager, + EigenDARollupManager: eigenDARollupManager, } tx, err := rollupCreator.CreateRollup( diff --git a/eigenda/decoding.go b/eigenda/decoding.go new file mode 100644 index 000000000..c509c4bfc --- /dev/null +++ b/eigenda/decoding.go @@ -0,0 +1,93 @@ +package eigenda + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + + "github.com/Layr-Labs/eigenda/encoding" + "github.com/Layr-Labs/eigenda/encoding/fft" + "github.com/Layr-Labs/eigenda/encoding/rs" + "github.com/Layr-Labs/eigenda/encoding/utils/codec" +) + +/* + These decodings are translated directly from core EigenDA client codec: + - https://github.com/Layr-Labs/eigenda/blob/44569ec461c9a1dd1191e7999a72e63bd1e7aba9/api/clients/codecs/ifft_codec.go#L27-L38 +*/ + +func FFT(data []byte) ([]byte, error) { + dataFr, err := rs.ToFrArray(data) + if err != nil { + return nil, fmt.Errorf("error converting data to fr.Element: %w", err) + } + dataFrLen := uint64(len(dataFr)) + dataFrLenPow2 := encoding.NextPowerOf2(dataFrLen) + + if dataFrLenPow2 != dataFrLen { + return nil, fmt.Errorf("data length %d is not a power of 2", dataFrLen) + } + + maxScale := uint8(math.Log2(float64(dataFrLenPow2))) + + fs := fft.NewFFTSettings(maxScale) + + dataFFTFr, err := fs.FFT(dataFr, false) + if err != nil { + return nil, fmt.Errorf("failed to perform FFT: %w", err) + } + + return rs.ToByteArray(dataFFTFr, dataFrLenPow2*encoding.BYTES_PER_SYMBOL), nil +} + +func DecodeiFFTBlob(data []byte) ([]byte, error) { + if len(data) == 0 { + return nil, fmt.Errorf("blob has length 0, meaning it is malformed") + } + var err error + data, err = FFT(data) + if err != nil { + return nil, fmt.Errorf("error FFTing data: %w", err) + } + + return GenericDecodeBlob(data) +} + +func GenericDecodeBlob(data []byte) ([]byte, error) { + if len(data) <= 32 { + return nil, fmt.Errorf("data is not of length greater than 32 bytes: %d", len(data)) + } + + data, err := DecodeBlob(data) + if err != nil { + return nil, err + } + + return data, nil +} + +func DecodeBlob(data []byte) ([]byte, error) { + if len(data) < 32 { + return nil, fmt.Errorf("blob does not contain 32 header bytes, meaning it is malformed") + } + + length := binary.BigEndian.Uint32(data[2:6]) + + // decode raw data modulo bn254 + decodedData := codec.RemoveEmptyByteFromPaddedBytes(data[32:]) + + // get non blob header data + reader := bytes.NewReader(decodedData) + rawData := make([]byte, length) + n, err := reader.Read(rawData) + if err != nil { + return nil, fmt.Errorf("failed to copy unpadded data into final buffer, length: %d, bytes read: %d", length, n) + } + if uint32(n) != length { + return nil, fmt.Errorf("data length does not match length prefix") + } + + return rawData, nil + +} diff --git a/eigenda/eigenda.go b/eigenda/eigenda.go new file mode 100644 index 000000000..534e9eb81 --- /dev/null +++ b/eigenda/eigenda.go @@ -0,0 +1,173 @@ +package eigenda + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbutil" +) + +const ( + // NOTE - this will need to be updated everytime there are changes to the Inbox interface + // TODO - consoldiate ABI to only include `addSequencerBatchFromEigenDA` method signature or add ingestion of ABI file upon initialization of an arbitrum node + sequencerInboxABI = `[{"type":"constructor","inputs":[{"name":"_maxDataSize","type":"uint256","internalType":"uint256"},{"name":"reader4844_","type":"address","internalType":"contract IReader4844"},{"name":"eigenDAServiceManager_","type":"address","internalType":"contract IEigenDAServiceManager"},{"name":"eigenDARollupManager_","type":"address","internalType":"contract IRollupManager"},{"name":"_isUsingFeeToken","type":"bool","internalType":"bool"}],"stateMutability":"nonpayable"},{"type":"function","name":"BROTLI_MESSAGE_HEADER_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"DAS_MESSAGE_HEADER_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"DATA_AUTHENTICATED_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"DATA_BLOB_HEADER_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"EIGENDA_MESSAGE_HEADER_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"HEADER_LENGTH","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"TREE_DAS_MESSAGE_HEADER_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"ZERO_HEAVY_MESSAGE_HEADER_FLAG","inputs":[],"outputs":[{"name":"","type":"bytes1","internalType":"bytes1"}],"stateMutability":"view"},{"type":"function","name":"addSequencerL2Batch","inputs":[{"name":"sequenceNumber","type":"uint256","internalType":"uint256"},{"name":"data","type":"bytes","internalType":"bytes"},{"name":"afterDelayedMessagesRead","type":"uint256","internalType":"uint256"},{"name":"gasRefunder","type":"address","internalType":"contract IGasRefunder"},{"name":"prevMessageCount","type":"uint256","internalType":"uint256"},{"name":"newMessageCount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"addSequencerL2BatchFromBlobs","inputs":[{"name":"sequenceNumber","type":"uint256","internalType":"uint256"},{"name":"afterDelayedMessagesRead","type":"uint256","internalType":"uint256"},{"name":"gasRefunder","type":"address","internalType":"contract IGasRefunder"},{"name":"prevMessageCount","type":"uint256","internalType":"uint256"},{"name":"newMessageCount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"addSequencerL2BatchFromEigenDA","inputs":[{"name":"sequenceNumber","type":"uint256","internalType":"uint256"},{"name":"blobVerificationProof","type":"tuple","internalType":"struct EigenDARollupUtils.BlobVerificationProof","components":[{"name":"batchId","type":"uint32","internalType":"uint32"},{"name":"blobIndex","type":"uint32","internalType":"uint32"},{"name":"batchMetadata","type":"tuple","internalType":"struct IEigenDAServiceManager.BatchMetadata","components":[{"name":"batchHeader","type":"tuple","internalType":"struct IEigenDAServiceManager.BatchHeader","components":[{"name":"blobHeadersRoot","type":"bytes32","internalType":"bytes32"},{"name":"quorumNumbers","type":"bytes","internalType":"bytes"},{"name":"signedStakeForQuorums","type":"bytes","internalType":"bytes"},{"name":"referenceBlockNumber","type":"uint32","internalType":"uint32"}]},{"name":"signatoryRecordHash","type":"bytes32","internalType":"bytes32"},{"name":"confirmationBlockNumber","type":"uint32","internalType":"uint32"}]},{"name":"inclusionProof","type":"bytes","internalType":"bytes"},{"name":"quorumIndices","type":"bytes","internalType":"bytes"}]},{"name":"blobHeader","type":"tuple","internalType":"struct IEigenDAServiceManager.BlobHeader","components":[{"name":"commitment","type":"tuple","internalType":"struct BN254.G1Point","components":[{"name":"X","type":"uint256","internalType":"uint256"},{"name":"Y","type":"uint256","internalType":"uint256"}]},{"name":"dataLength","type":"uint32","internalType":"uint32"},{"name":"quorumBlobParams","type":"tuple[]","internalType":"struct IEigenDAServiceManager.QuorumBlobParam[]","components":[{"name":"quorumNumber","type":"uint8","internalType":"uint8"},{"name":"adversaryThresholdPercentage","type":"uint8","internalType":"uint8"},{"name":"confirmationThresholdPercentage","type":"uint8","internalType":"uint8"},{"name":"chunkLength","type":"uint32","internalType":"uint32"}]}]},{"name":"afterDelayedMessagesRead","type":"uint256","internalType":"uint256"},{"name":"prevMessageCount","type":"uint256","internalType":"uint256"},{"name":"newMessageCount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"addSequencerL2BatchFromOrigin","inputs":[{"name":"","type":"uint256","internalType":"uint256"},{"name":"","type":"bytes","internalType":"bytes"},{"name":"","type":"uint256","internalType":"uint256"},{"name":"","type":"address","internalType":"contract IGasRefunder"}],"outputs":[],"stateMutability":"pure"},{"type":"function","name":"addSequencerL2BatchFromOrigin","inputs":[{"name":"sequenceNumber","type":"uint256","internalType":"uint256"},{"name":"data","type":"bytes","internalType":"bytes"},{"name":"afterDelayedMessagesRead","type":"uint256","internalType":"uint256"},{"name":"gasRefunder","type":"address","internalType":"contract IGasRefunder"},{"name":"prevMessageCount","type":"uint256","internalType":"uint256"},{"name":"newMessageCount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"batchCount","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"batchPosterManager","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"bridge","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract IBridge"}],"stateMutability":"view"},{"type":"function","name":"dasKeySetInfo","inputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"isValidKeyset","type":"bool","internalType":"bool"},{"name":"creationBlock","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"eigenDARollupManager","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract IRollupManager"}],"stateMutability":"view"},{"type":"function","name":"eigenDAServiceManager","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract IEigenDAServiceManager"}],"stateMutability":"view"},{"type":"function","name":"forceInclusion","inputs":[{"name":"_totalDelayedMessagesRead","type":"uint256","internalType":"uint256"},{"name":"kind","type":"uint8","internalType":"uint8"},{"name":"l1BlockAndTime","type":"uint64[2]","internalType":"uint64[2]"},{"name":"baseFeeL1","type":"uint256","internalType":"uint256"},{"name":"sender","type":"address","internalType":"address"},{"name":"messageDataHash","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"getKeysetCreationBlock","inputs":[{"name":"ksHash","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"inboxAccs","inputs":[{"name":"index","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"initialize","inputs":[{"name":"bridge_","type":"address","internalType":"contract IBridge"},{"name":"maxTimeVariation_","type":"tuple","internalType":"struct ISequencerInbox.MaxTimeVariation","components":[{"name":"delayBlocks","type":"uint256","internalType":"uint256"},{"name":"futureBlocks","type":"uint256","internalType":"uint256"},{"name":"delaySeconds","type":"uint256","internalType":"uint256"},{"name":"futureSeconds","type":"uint256","internalType":"uint256"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"invalidateKeysetHash","inputs":[{"name":"ksHash","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"isBatchPoster","inputs":[{"name":"","type":"address","internalType":"address"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"isSequencer","inputs":[{"name":"","type":"address","internalType":"address"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"isUsingFeeToken","inputs":[],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"isValidKeysetHash","inputs":[{"name":"ksHash","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"maxDataSize","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"maxTimeVariation","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"},{"name":"","type":"uint256","internalType":"uint256"},{"name":"","type":"uint256","internalType":"uint256"},{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"postUpgradeInit","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"reader4844","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract IReader4844"}],"stateMutability":"view"},{"type":"function","name":"removeDelayAfterFork","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"rollup","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract IOwnable"}],"stateMutability":"view"},{"type":"function","name":"setBatchPosterManager","inputs":[{"name":"newBatchPosterManager","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setIsBatchPoster","inputs":[{"name":"addr","type":"address","internalType":"address"},{"name":"isBatchPoster_","type":"bool","internalType":"bool"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setIsSequencer","inputs":[{"name":"addr","type":"address","internalType":"address"},{"name":"isSequencer_","type":"bool","internalType":"bool"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setMaxTimeVariation","inputs":[{"name":"maxTimeVariation_","type":"tuple","internalType":"struct ISequencerInbox.MaxTimeVariation","components":[{"name":"delayBlocks","type":"uint256","internalType":"uint256"},{"name":"futureBlocks","type":"uint256","internalType":"uint256"},{"name":"delaySeconds","type":"uint256","internalType":"uint256"},{"name":"futureSeconds","type":"uint256","internalType":"uint256"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setValidKeyset","inputs":[{"name":"keysetBytes","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"totalDelayedMessagesRead","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"event","name":"InboxMessageDelivered","inputs":[{"name":"messageNum","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"data","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"InboxMessageDeliveredFromOrigin","inputs":[{"name":"messageNum","type":"uint256","indexed":true,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"InvalidateKeyset","inputs":[{"name":"keysetHash","type":"bytes32","indexed":true,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"OwnerFunctionCalled","inputs":[{"name":"id","type":"uint256","indexed":true,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"SequencerBatchData","inputs":[{"name":"batchSequenceNumber","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"data","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"SequencerBatchDelivered","inputs":[{"name":"batchSequenceNumber","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"beforeAcc","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"afterAcc","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"delayedAcc","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"afterDelayedMessagesRead","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"timeBounds","type":"tuple","indexed":false,"internalType":"struct IBridge.TimeBounds","components":[{"name":"minTimestamp","type":"uint64","internalType":"uint64"},{"name":"maxTimestamp","type":"uint64","internalType":"uint64"},{"name":"minBlockNumber","type":"uint64","internalType":"uint64"},{"name":"maxBlockNumber","type":"uint64","internalType":"uint64"}]},{"name":"dataLocation","type":"uint8","indexed":false,"internalType":"enum IBridge.BatchDataLocation"}],"anonymous":false},{"type":"event","name":"SetValidKeyset","inputs":[{"name":"keysetHash","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"keysetBytes","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"error","name":"AlreadyInit","inputs":[]},{"type":"error","name":"AlreadyValidDASKeyset","inputs":[{"name":"","type":"bytes32","internalType":"bytes32"}]},{"type":"error","name":"BadMaxTimeVariation","inputs":[]},{"type":"error","name":"BadPostUpgradeInit","inputs":[]},{"type":"error","name":"BadSequencerNumber","inputs":[{"name":"stored","type":"uint256","internalType":"uint256"},{"name":"received","type":"uint256","internalType":"uint256"}]},{"type":"error","name":"DataBlobsNotSupported","inputs":[]},{"type":"error","name":"DataTooLarge","inputs":[{"name":"dataLength","type":"uint256","internalType":"uint256"},{"name":"maxDataLength","type":"uint256","internalType":"uint256"}]},{"type":"error","name":"DelayedBackwards","inputs":[]},{"type":"error","name":"DelayedTooFar","inputs":[]},{"type":"error","name":"Deprecated","inputs":[]},{"type":"error","name":"ForceIncludeBlockTooSoon","inputs":[]},{"type":"error","name":"ForceIncludeTimeTooSoon","inputs":[]},{"type":"error","name":"HadZeroInit","inputs":[]},{"type":"error","name":"IncorrectMessagePreimage","inputs":[]},{"type":"error","name":"InitParamZero","inputs":[{"name":"name","type":"string","internalType":"string"}]},{"type":"error","name":"InvalidHeaderFlag","inputs":[{"name":"","type":"bytes1","internalType":"bytes1"}]},{"type":"error","name":"MissingDataHashes","inputs":[]},{"type":"error","name":"NativeTokenMismatch","inputs":[]},{"type":"error","name":"NoSuchKeyset","inputs":[{"name":"","type":"bytes32","internalType":"bytes32"}]},{"type":"error","name":"NotBatchPoster","inputs":[]},{"type":"error","name":"NotBatchPosterManager","inputs":[{"name":"","type":"address","internalType":"address"}]},{"type":"error","name":"NotForked","inputs":[]},{"type":"error","name":"NotOrigin","inputs":[]},{"type":"error","name":"NotOwner","inputs":[{"name":"sender","type":"address","internalType":"address"},{"name":"owner","type":"address","internalType":"address"}]}]` +) + +// EigenDAMessageHeaderFlag indicates the message is an EigenDA message +const EigenDAMessageHeaderFlag byte = 0xed + +func IsEigenDAMessageHeaderByte(header byte) bool { + return hasBits(EigenDAMessageHeaderFlag, header) +} + +// hasBits returns true if `checking` has all `bits` +func hasBits(checking byte, bits byte) bool { + return (checking & bits) == bits +} + +type EigenDAWriter interface { + Store(context.Context, []byte) (*EigenDABlobInfo, error) + Serialize(eigenDABlobInfo *EigenDABlobInfo) ([]byte, error) +} + +type EigenDAReader interface { + QueryBlob(ctx context.Context, cert *EigenDABlobInfo, domainFilter string) ([]byte, error) +} + +type EigenDAConfig struct { + Enable bool `koanf:"enable"` + Rpc string `koanf:"rpc"` +} + +type EigenDA struct { + client *EigenDAProxyClient +} + +func NewEigenDA(proxyServerRpc string) (*EigenDA, error) { + client := NewEigenDAProxyClient(proxyServerRpc) + + return &EigenDA{ + client: client, + }, nil +} + +func (e *EigenDA) QueryBlob(ctx context.Context, cert *EigenDABlobInfo, domainFilter string) ([]byte, error) { + log.Info("Querying blob from EigenDA") + + info, err := cert.ToDisperserBlobInfo() + if err != nil { + return nil, err + } + + data, err := e.client.Get(ctx, info, domainFilter) + if err != nil { + return nil, err + } + + return data, nil +} + +// Store disperses a blob to EigenDA and returns the appropriate EigenDABlobInfo or certificate values +func (e *EigenDA) Store(ctx context.Context, data []byte) (*EigenDABlobInfo, error) { + log.Info("Dispersing blob to EigenDA", "data", hex.EncodeToString(data)) + var blobInfo = &EigenDABlobInfo{} + cert, err := e.client.Put(ctx, data) + if err != nil { + return nil, err + } + + blobInfo.LoadBlobInfo(cert) + + return blobInfo, nil +} + +func (e *EigenDA) Serialize(blobInfo *EigenDABlobInfo) ([]byte, error) { + return rlp.EncodeToBytes(blobInfo) +} + +func RecoverPayloadFromEigenDABatch(ctx context.Context, + sequencerMsg []byte, + daReader EigenDAReader, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + domain string, +) ([]byte, error) { + log.Info("Start recovering payload from eigenda: ", "data", hex.EncodeToString(sequencerMsg)) + var eigenDAPreimages map[common.Hash][]byte + if preimages != nil { + if preimages[arbutil.EigenDaPreimageType] == nil { + preimages[arbutil.EigenDaPreimageType] = make(map[common.Hash][]byte) + } + eigenDAPreimages = preimages[arbutil.EigenDaPreimageType] + } + + blobInfo, err := ParseSequencerMsg(sequencerMsg) + if err != nil { + log.Error("Failed to parse sequencer message", "err", err) + return nil, err + } + + data, err := daReader.QueryBlob(ctx, blobInfo, domain) + if err != nil { + log.Error("Failed to query data from EigenDA", "err", err) + return nil, err + } + + // record preimage data for EigenDA using the hash of the commitment + // for lookups in the replay script + kzgCommit, err := blobInfo.SerializeCommitment() + if err != nil { + return nil, err + } + shaDataHash := sha256.New() + shaDataHash.Write(kzgCommit) + dataHash := shaDataHash.Sum([]byte{}) + dataHash[0] = 1 + if eigenDAPreimages != nil { + eigenDAPreimages[common.BytesToHash(dataHash)] = data + } + return data, nil +} + +// ParseSequencerMsg parses the inbox tx calldata into a structured EigenDABlobInfo +func ParseSequencerMsg(calldata []byte) (*EigenDABlobInfo, error) { + + if len(calldata) < 4 { + return nil, errors.New("calldata is shorter than expected method signature length") + } + + // TODO: Construct the ABI struct at node initialization + abi, err := abi.JSON(strings.NewReader(sequencerInboxABI)) + if err != nil { + return nil, err + } + + method, err := abi.MethodById(calldata[0:4]) + if err != nil { + return nil, err + } + + callDataValues, err := method.Inputs.Unpack(calldata[4:]) + if err != nil { + return nil, err + } + + inboxPayload := &InboxPayload{} + + err = inboxPayload.Load(callDataValues) + if err != nil { + return nil, err + } + + return &EigenDABlobInfo{ + BlobVerificationProof: inboxPayload.BlobVerificationProof, + BlobHeader: inboxPayload.BlobHeader, + }, nil + +} diff --git a/eigenda/eigenda_test.go b/eigenda/eigenda_test.go new file mode 100644 index 000000000..844e85c07 --- /dev/null +++ b/eigenda/eigenda_test.go @@ -0,0 +1,34 @@ +package eigenda + +import ( + "encoding/hex" + "testing" +) + +func TestParseSequencerMsg(t *testing.T) { + calldataString := "43a85289000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000004500000000000000000000000000000000000000000000000000000000000001a400000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000000000603110cdec4dda50a1465d571f980c07db659331ffc7b376d5eef7fe298ded2f3f00000000000000000000000000000000000000000000000000000000000000043110cdec4dda50a1465d571f980c07db659331ffc7b376d5eef7fe298ded2f3f000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a54baeff59fa897b3360d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020100000000000000000000000000000000000000000000000000000000000000007e2db2683cd5ec31b62b50b9a685140076b483f1f85b931f493480cbfd9eda10a964fcc86dbace6cedd749b878523e8bdc8ad1c04104cdbf1482d79e3367b90000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000001d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000012c" + + calldata, err := hex.DecodeString(calldataString) + if err != nil { + t.Fatalf("Failed to decode calldata: %v", err) + } + + expected := &EigenDABlobInfo{ + // BatchHeader content for hashing + BlobVerificationProof: BlobVerificationProof{ + BatchID: 69, + }, + } + + // Call the function with the mock calldata + result, err := ParseSequencerMsg(calldata) + if err != nil { + t.Fatalf("ParseSequencerMsg returned an error: %v", err) + } + + // TODO: Extend the test to cover all fields + if result.BlobVerificationProof.BatchID != expected.BlobVerificationProof.BatchID { + t.Errorf("BlobIndex was incorrect, got: %v, want: %v", result.BlobVerificationProof.BatchID, expected.BlobVerificationProof.BatchID) + } + +} diff --git a/eigenda/proxy.go b/eigenda/proxy.go new file mode 100644 index 000000000..e5942974b --- /dev/null +++ b/eigenda/proxy.go @@ -0,0 +1,193 @@ +package eigenda + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + "github.com/Layr-Labs/eigenda/api/grpc/disperser" + "github.com/ethereum/go-ethereum/rlp" +) + +type EigenDAProxyClient struct { + client ProxyClient +} + +func NewEigenDAProxyClient(RPCUrl string) *EigenDAProxyClient { + + c := New(&Config{ + URL: RPCUrl, + }) + return &EigenDAProxyClient{client: c} +} + +func (c *EigenDAProxyClient) Put(ctx context.Context, data []byte) (*disperser.BlobInfo, error) { + cert, err := c.client.SetData(ctx, data) + if err != nil { + return nil, fmt.Errorf("failed to set data: %w", err) + } + + var blobInfo disperser.BlobInfo + err = rlp.DecodeBytes(cert[1:], &blobInfo) + if err != nil { + return nil, fmt.Errorf("failed to decode blob info: %w", err) + } + + return &blobInfo, nil +} + +func (c *EigenDAProxyClient) Get(ctx context.Context, blobInfo *DisperserBlobInfo, domainFilter string) ([]byte, error) { + commitment, err := rlp.EncodeToBytes(blobInfo) + if err != nil { + return nil, fmt.Errorf("failed to encode blob info: %w", err) + } + + commitWithVersion := append([]byte{0x0}, commitment...) + + data, err := c.client.GetData(ctx, commitWithVersion, StrToDomainType(domainFilter)) + if err != nil { + return nil, fmt.Errorf("failed to get data: %w", err) + } + + return data, nil +} + +// DomainType is a enumeration type for the different data domains for which a +// blob can exist between +type DomainType uint8 + +const ( + BinaryDomain DomainType = iota + PolyDomain + UnknownDomain +) + +func (d DomainType) String() string { + switch d { + case BinaryDomain: + return "binary" + case PolyDomain: + return "polynomial" + default: + return "unknown" + } +} + +func StrToDomainType(s string) DomainType { + switch s { + case "binary": + return BinaryDomain + case "polynomial": + return PolyDomain + default: + return UnknownDomain + } +} + +// TODO: Add support for custom http client option +type Config struct { + Actor string + URL string +} + +// ProxyClient is an interface for communicating with the EigenDA proxy server +type ProxyClient interface { + Health() error + GetData(ctx context.Context, cert []byte, domain DomainType) ([]byte, error) + SetData(ctx context.Context, b []byte) ([]byte, error) +} + +// client is the implementation of ProxyClient +type client struct { + cfg *Config + httpClient *http.Client +} + +var _ ProxyClient = (*client)(nil) + +func New(cfg *Config) ProxyClient { + return &client{ + cfg, + http.DefaultClient, + } +} + +// Health indicates if the server is operational; useful for event based awaits +// when integration testing +func (c *client) Health() error { + url := c.cfg.URL + "/health" + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received bad status code: %d", resp.StatusCode) + } + + return nil +} + +// GetData fetches blob data associated with a DA certificate +func (c *client) GetData(ctx context.Context, comm []byte, domain DomainType) ([]byte, error) { + url := fmt.Sprintf("%s/get/0x%x?domain=%s&commitment_mode=simple", c.cfg.URL, comm, domain.String()) + + if c.cfg.Actor != "" { + url = fmt.Sprintf("%s&actor=%s", url, c.cfg.Actor) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to construct http request: %e", err) + } + + req.Header.Set("Content-Type", "application/octet-stream") + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("received unexpected response code: %d", resp.StatusCode) + } + + return io.ReadAll(resp.Body) +} + +// SetData writes raw byte data to DA and returns the respective certificate +func (c *client) SetData(ctx context.Context, b []byte) ([]byte, error) { + url := fmt.Sprintf("%s/put/?commitment_mode=simple", c.cfg.URL) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request: %w", err) + } + req.Header.Set("Content-Type", "application/octet-stream") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to store data: %v", resp.StatusCode) + } + + b, err = io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if len(b) == 0 { + return nil, fmt.Errorf("read certificate is empty") + } + + return b, err +} diff --git a/eigenda/types.go b/eigenda/types.go new file mode 100644 index 000000000..daaa214cb --- /dev/null +++ b/eigenda/types.go @@ -0,0 +1,397 @@ +package eigenda + +import ( + "errors" + "math/big" + + "github.com/Layr-Labs/eigenda/api/grpc/disperser" + "github.com/ethereum/go-ethereum/accounts/abi" + "golang.org/x/crypto/sha3" +) + +/* + Two rather redundant implementations of the same data structure exist: + - EigenDABlobInfo: represents the EigenDABlobInfo struct which is encoded in the calldata of the sequencer message for on-chain cert verification + - DisperserBlobInfo: represents the disperser.BlobInfo struct generated by the grpc disperser protobuf +*/ + +type EigenDABlobInfo struct { + BlobHeader BlobHeader `json:"blobHeader"` + BlobVerificationProof BlobVerificationProof `json:"blobVerificationProof"` +} + +type BlobHeader struct { + Commitment G1Point `json:"commitment"` + DataLength uint32 `json:"dataLength"` + QuorumBlobParams []QuorumBlobParams `json:"quorumBlobParams"` +} + +type G1Point struct { + X *big.Int + Y *big.Int +} + +type QuorumBlobParams struct { + QuorumNumber uint8 + AdversaryThresholdPercentage uint8 + ConfirmationThresholdPercentage uint8 + ChunkLength uint32 +} + +type BlobVerificationProof struct { + BatchID uint32 `json:"batchId"` + BlobIndex uint32 `json:"blobIndex"` + BatchMetadata BatchMetadata `json:"batchMetadata"` + InclusionProof []byte `json:"inclusionProof"` + QuorumIndices []byte `json:"quorumIndices"` +} + +type BatchMetadata struct { + BatchHeader BatchHeader `json:"batchHeader"` + Fee []byte `json:"fee"` + SignatoryRecordHash [32]byte `json:"signatoryRecordHash"` + ConfirmationBlockNumber uint32 `json:"confirmationBlockNumber"` + BatchHeaderHash []byte `json:"batchHeaderHash"` +} + +type BatchHeader struct { + BlobHeadersRoot [32]byte `json:"blobHeadersRoot"` + QuorumNumbers []byte `json:"quorumNumbers"` + SignedStakeForQuorums []byte `json:"signedStakeForQuorums"` + ReferenceBlockNumber uint32 `json:"referenceBlockNumber"` +} + +func (h *DisperserBatchHeader) Encode() ([]byte, error) { + // The order here has to match the field ordering of ReducedBatchHeader defined in IEigenDAServiceManager.sol + // ref: https://github.com/Layr-Labs/eigenda/blob/master/contracts/src/interfaces/IEigenDAServiceManager.sol#L43 + batchHeaderType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + { + Name: "blobHeadersRoot", + Type: "bytes32", + }, + { + Name: "referenceBlockNumber", + Type: "uint32", + }, + }) + if err != nil { + return nil, err + } + + arguments := abi.Arguments{ + { + Type: batchHeaderType, + }, + } + + bytes32BatchRoot := [32]byte(h.BatchRoot) + + // cast batch root to bytes32 + + s := struct { + BlobHeadersRoot [32]byte + ReferenceBlockNumber uint32 + }{ + BlobHeadersRoot: bytes32BatchRoot, + ReferenceBlockNumber: uint32(h.ReferenceBlockNumber), + } + + bytes, err := arguments.Pack(s) + if err != nil { + return nil, err + } + + return bytes, nil +} + +// GetBatchHeaderHash returns the hash of the reduced BatchHeader that is used to sign the Batch +// ref: https://github.com/Layr-Labs/eigenda/blob/master/contracts/src/libraries/EigenDAHasher.sol#L65 +func (h DisperserBatchHeader) GetBatchHeaderHash() ([32]byte, error) { + headerByte, err := h.Encode() + if err != nil { + return [32]byte{}, err + } + + var headerHash [32]byte + hasher := sha3.NewLegacyKeccak256() + hasher.Write(headerByte) + copy(headerHash[:], hasher.Sum(nil)[:32]) + + return headerHash, nil +} + +// SerializeCommitment serializes the kzg commitment points to a byte slice +func (e *EigenDABlobInfo) SerializeCommitment() ([]byte, error) { + return append(e.BlobHeader.Commitment.X.Bytes(), e.BlobHeader.Commitment.Y.Bytes()...), nil +} + +// loadBlobInfo loads the disperser.BlobInfo struct into the EigenDABlobInfo struct +func (b *EigenDABlobInfo) LoadBlobInfo(disperserBlobInfo *disperser.BlobInfo) { + + x := disperserBlobInfo.GetBlobHeader().GetCommitment().GetX() + y := disperserBlobInfo.GetBlobHeader().GetCommitment().GetY() + + b.BlobHeader = BlobHeader{} + + b.BlobHeader.Commitment = G1Point{ + X: new(big.Int).SetBytes(x), + Y: new(big.Int).SetBytes(y), + } + + b.BlobHeader.DataLength = disperserBlobInfo.GetBlobHeader().GetDataLength() + + for _, quorumBlobParam := range disperserBlobInfo.GetBlobHeader().GetBlobQuorumParams() { + b.BlobHeader.QuorumBlobParams = append(b.BlobHeader.QuorumBlobParams, QuorumBlobParams{ + QuorumNumber: uint8(quorumBlobParam.QuorumNumber), + AdversaryThresholdPercentage: uint8(quorumBlobParam.AdversaryThresholdPercentage), + ConfirmationThresholdPercentage: uint8(quorumBlobParam.ConfirmationThresholdPercentage), + ChunkLength: quorumBlobParam.ChunkLength, + }) + } + + var signatoryRecordHash [32]byte + copy(signatoryRecordHash[:], disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetSignatoryRecordHash()) + + b.BlobVerificationProof.BatchID = disperserBlobInfo.GetBlobVerificationProof().GetBatchId() + b.BlobVerificationProof.BlobIndex = disperserBlobInfo.GetBlobVerificationProof().GetBlobIndex() + b.BlobVerificationProof.BatchMetadata = BatchMetadata{ + Fee: disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetFee(), + BatchHeaderHash: disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetBatchHeaderHash(), + BatchHeader: BatchHeader{}, + SignatoryRecordHash: signatoryRecordHash, + ConfirmationBlockNumber: disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetConfirmationBlockNumber(), + } + + b.BlobVerificationProof.InclusionProof = disperserBlobInfo.GetBlobVerificationProof().GetInclusionProof() + b.BlobVerificationProof.QuorumIndices = disperserBlobInfo.GetBlobVerificationProof().GetQuorumIndexes() + + batchRootSlice := disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetBatchHeader().GetBatchRoot() + var blobHeadersRoot [32]byte + copy(blobHeadersRoot[:], batchRootSlice) + b.BlobVerificationProof.BatchMetadata.BatchHeader.BlobHeadersRoot = blobHeadersRoot + + b.BlobVerificationProof.BatchMetadata.BatchHeader.QuorumNumbers = disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetBatchHeader().GetQuorumNumbers() + b.BlobVerificationProof.BatchMetadata.BatchHeader.SignedStakeForQuorums = disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetBatchHeader().GetQuorumSignedPercentages() + b.BlobVerificationProof.BatchMetadata.BatchHeader.ReferenceBlockNumber = disperserBlobInfo.GetBlobVerificationProof().GetBatchMetadata().GetBatchHeader().GetReferenceBlockNumber() +} + +/* +DisperserBlobInfo is a Go struct that represents the disperser.BlobInfo struct +without requiring the overhead of importing the disperser package from core eigenda: + - https://github.com/Layr-Labs/eigenda/blob/master/api/grpc/disperser/disperser.pb.go +*/ + +type DisperserBlobInfo struct { + BlobHeader DisperserBlobHeader `json:"blob_header,omitempty"` + BlobVerificationProof DisperserBlobVerificationProof `json:"blob_verification_proof,omitempty"` +} + +type DisperserBlobHeader struct { + Commitment G1Commitment `json:"commitment,omitempty"` + DataLength uint32 `json:"data_length,omitempty"` + BlobQuorumParams []BlobQuorumParam `json:"blob_quorum_params,omitempty"` +} + +type G1Commitment struct { + X []byte `json:"x,omitempty"` + Y []byte `json:"y,omitempty"` +} + +type BlobQuorumParam struct { + QuorumNumber uint32 `json:"quorum_number,omitempty"` + AdversaryThresholdPercentage uint32 `json:"adversary_threshold_percentage,omitempty"` + ConfirmationThresholdPercentage uint32 `json:"confirmation_threshold_percentage,omitempty"` + ChunkLength uint32 `json:"chunk_length,omitempty"` +} + +type DisperserBlobVerificationProof struct { + BatchId uint32 `json:"batch_id,omitempty"` + BlobIndex uint32 `json:"blob_index,omitempty"` + BatchMetadata DisperserBatchMetadata `json:"batch_metadata,omitempty"` + InclusionProof []byte `json:"inclusion_proof,omitempty"` + QuorumIndexes []byte `json:"quorum_indexes,omitempty"` +} + +type DisperserBatchMetadata struct { + BatchHeader DisperserBatchHeader `json:"batch_header,omitempty"` + SignatoryRecordHash []byte `json:"signatory_record_hash,omitempty"` + Fee []byte `json:"fee"` + ConfirmationBlockNumber uint32 `json:"confirmation_block_number,omitempty"` + BatchHeaderHash []byte `json:"batchHeaderHash"` +} + +type DisperserBatchHeader struct { + BatchRoot []byte `json:"batch_root,omitempty"` + QuorumNumbers []byte `json:"quorum_numbers,omitempty"` + QuorumSignedPercentages []byte `json:"quorum_signed_percentages,omitempty"` + ReferenceBlockNumber uint32 `json:"reference_block_number,omitempty"` +} + +/* +Convert EigenDABlobInfo to DisperserBlobInfo struct for compatibility with proxy server expected type +*/ +func (e *EigenDABlobInfo) ToDisperserBlobInfo() (*DisperserBlobInfo, error) { + // Convert BlobHeader + var disperserBlobHeader DisperserBlobHeader + commitment := G1Commitment{ + X: e.BlobHeader.Commitment.X.Bytes(), + Y: e.BlobHeader.Commitment.Y.Bytes(), + } + quorumParams := make([]BlobQuorumParam, len(e.BlobHeader.QuorumBlobParams)) + for i, qp := range e.BlobHeader.QuorumBlobParams { + quorumParams[i] = BlobQuorumParam{ + QuorumNumber: uint32(qp.QuorumNumber), + AdversaryThresholdPercentage: uint32(qp.AdversaryThresholdPercentage), + ConfirmationThresholdPercentage: uint32(qp.ConfirmationThresholdPercentage), + ChunkLength: qp.ChunkLength, + } + } + disperserBlobHeader = DisperserBlobHeader{ + Commitment: commitment, + DataLength: e.BlobHeader.DataLength, + BlobQuorumParams: quorumParams, + } + + // Convert BlobVerificationProof + var disperserBlobVerificationProof DisperserBlobVerificationProof + if &e.BlobVerificationProof != nil { + var disperserBatchMetadata DisperserBatchMetadata + if &e.BlobVerificationProof.BatchMetadata != nil { + metadata := e.BlobVerificationProof.BatchMetadata + quorumNumbers := metadata.BatchHeader.QuorumNumbers + quorumSignedPercentages := metadata.BatchHeader.SignedStakeForQuorums + + disperserBatchMetadata = DisperserBatchMetadata{ + BatchHeader: DisperserBatchHeader{ + BatchRoot: metadata.BatchHeader.BlobHeadersRoot[:], + QuorumNumbers: quorumNumbers, + QuorumSignedPercentages: quorumSignedPercentages, + ReferenceBlockNumber: metadata.BatchHeader.ReferenceBlockNumber, + }, + BatchHeaderHash: metadata.BatchHeaderHash, + Fee: metadata.Fee, + SignatoryRecordHash: metadata.SignatoryRecordHash[:], + ConfirmationBlockNumber: metadata.ConfirmationBlockNumber, + } + } + disperserBlobVerificationProof = DisperserBlobVerificationProof{ + BatchId: e.BlobVerificationProof.BatchID, + BlobIndex: e.BlobVerificationProof.BlobIndex, + BatchMetadata: disperserBatchMetadata, + InclusionProof: e.BlobVerificationProof.InclusionProof, + QuorumIndexes: e.BlobVerificationProof.QuorumIndices, + } + } + + // set batchHeaderHash if not set + + batchHeaderHash, err := disperserBlobVerificationProof.BatchMetadata.BatchHeader.GetBatchHeaderHash() + if err != nil { + return nil, err + } + + disperserBlobVerificationProof.BatchMetadata.BatchHeaderHash = batchHeaderHash[:] + + return &DisperserBlobInfo{ + BlobHeader: disperserBlobHeader, + BlobVerificationProof: disperserBlobVerificationProof, + }, nil +} + +// InboxPayload is a structured representation of the calldata used for the EigenDA `addSequencerL2BatchFromEigenDA` method call +// for persisting certificates into the inbox sequence +type InboxPayload struct { + BlobVerificationProof BlobVerificationProof + BlobHeader BlobHeader +} + +// Load ingest loads calldata to a payload struct which explicitly defines the parsed +// calldata fields +func (ip *InboxPayload) Load(callDataValues []interface{}) error { + if len(callDataValues) != 6 { + return errors.New("calldata does not have the expected number of parameters") + } + + blobVerificationProof, passed := callDataValues[1].(struct { + BatchId uint32 `json:"batchId"` + BlobIndex uint32 `json:"blobIndex"` + BatchMetadata struct { + BatchHeader struct { + BlobHeadersRoot [32]uint8 `json:"blobHeadersRoot"` + QuorumNumbers []uint8 `json:"quorumNumbers"` + SignedStakeForQuorums []uint8 `json:"signedStakeForQuorums"` + ReferenceBlockNumber uint32 `json:"referenceBlockNumber"` + } `json:"batchHeader"` + SignatoryRecordHash [32]uint8 `json:"signatoryRecordHash"` + ConfirmationBlockNumber uint32 `json:"confirmationBlockNumber"` + } `json:"batchMetadata"` + InclusionProof []uint8 `json:"inclusionProof"` + QuorumIndices []uint8 `json:"quorumIndices"` + }) + + if !passed { + return errors.New("failed to parse blob verification proof") + } + + blobHeader, passed := callDataValues[2].(struct { + Commitment struct { + X *big.Int `json:"X"` + Y *big.Int `json:"Y"` + } `json:"commitment"` + DataLength uint32 `json:"dataLength"` + QuorumBlobParams []struct { + QuorumNumber uint8 `json:"quorumNumber"` + AdversaryThresholdPercentage uint8 `json:"adversaryThresholdPercentage"` + ConfirmationThresholdPercentage uint8 `json:"confirmationThresholdPercentage"` + ChunkLength uint32 `json:"chunkLength"` + } `json:"quorumBlobParams"` + }) + + if !passed { + return errors.New("failed to parse blob header") + } + + payload := InboxPayload{ + BlobVerificationProof: BlobVerificationProof{ + BatchID: blobVerificationProof.BatchId, + BlobIndex: blobVerificationProof.BlobIndex, + BatchMetadata: BatchMetadata{ + BatchHeader: BatchHeader{ + BlobHeadersRoot: blobVerificationProof.BatchMetadata.BatchHeader.BlobHeadersRoot, + QuorumNumbers: blobVerificationProof.BatchMetadata.BatchHeader.QuorumNumbers, + SignedStakeForQuorums: blobVerificationProof.BatchMetadata.BatchHeader.SignedStakeForQuorums, + ReferenceBlockNumber: blobVerificationProof.BatchMetadata.BatchHeader.ReferenceBlockNumber, + }, + Fee: []byte{}, + BatchHeaderHash: []byte{}, + + SignatoryRecordHash: blobVerificationProof.BatchMetadata.SignatoryRecordHash, + ConfirmationBlockNumber: blobVerificationProof.BatchMetadata.ConfirmationBlockNumber, + }, + InclusionProof: blobVerificationProof.InclusionProof, + QuorumIndices: blobVerificationProof.QuorumIndices, + }, + BlobHeader: BlobHeader{ + Commitment: G1Point{ + X: blobHeader.Commitment.X, + Y: blobHeader.Commitment.Y, + }, + DataLength: blobHeader.DataLength, + QuorumBlobParams: func() []QuorumBlobParams { + params := make([]QuorumBlobParams, len(blobHeader.QuorumBlobParams)) + for i, p := range blobHeader.QuorumBlobParams { + params[i] = QuorumBlobParams{ + QuorumNumber: p.QuorumNumber, + AdversaryThresholdPercentage: p.AdversaryThresholdPercentage, + ConfirmationThresholdPercentage: p.ConfirmationThresholdPercentage, + ChunkLength: p.ChunkLength, + } + } + return params + }(), + }, + } + + *ip = payload + return nil +} diff --git a/go-ethereum b/go-ethereum index 22399a74e..a8c6813c8 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 22399a74e2b413e99a4f0d06c65862ced0d021c7 +Subproject commit a8c6813c85488a23d2c527b1e20e398323d349d0 diff --git a/go.mod b/go.mod index 4bca21dc6..649f147ac 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,6 @@ require ( golang.org/x/sys v0.16.0 golang.org/x/term v0.16.0 golang.org/x/tools v0.15.0 - google.golang.org/grpc v1.59.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -297,6 +296,7 @@ require ( google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect diff --git a/go.sum b/go.sum index 7bbb99787..05f581be0 100644 --- a/go.sum +++ b/go.sum @@ -2313,4 +2313,4 @@ rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= \ No newline at end of file +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/nitro-testnode b/nitro-testnode index 019e15bc2..8302b1148 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 019e15bc21d1b3d9205afedb4d193e7102836631 +Subproject commit 8302b1148d19d96300171ad2dfd0f7a686674abe diff --git a/solgen/gen.go b/solgen/gen.go index 770fa0857..9c08e167c 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -118,6 +118,11 @@ func main() { _, file := filepath.Split(path) name := file[:len(file)-5] + if name != "Reader4844" { + continue + } + + log.Printf("Processing %s", name) data, err := os.ReadFile(path) if err != nil { log.Fatal("could not read", path, "for contract", name, err) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 567cc15ca..87fd4a669 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -10,7 +10,7 @@ import ( "math/big" "time" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 8d49d7f75..470d2b070 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -11,7 +11,7 @@ import ( "sync" "testing" - "github.com/offchainlabs/nitro/das/eigenda" + "github.com/offchainlabs/nitro/eigenda" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/validator/server_api" @@ -343,10 +343,14 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if v.eigenDAService == nil { log.Warn("EigenDA not configured, but sequencer message found with EigenDA header") } else { - _, err := eigenda.RecoverPayloadFromEigenDABatch(ctx, batch.Data[41:], v.eigenDAService, e.Preimages) + // we fetch the polynomial representation of the blob since its in coefficient form and compatible for + // generating witness proofs and kzg commitments within the arbitrator when constructing machine state proofs + // for EigenDA preimage types + _, err := eigenda.RecoverPayloadFromEigenDABatch(ctx, batch.Data[41:], v.eigenDAService, e.Preimages, "polynomial") if err != nil { return err } + log.Info("Recovered blob coefficient from EigenDA batch", "batch", batch.Number) } } } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index c37eb1db3..fb1a7f701 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -688,6 +688,8 @@ func DeployOnTestL1( nativeToken, maxDataSize, false, + common.HexToAddress("0x0"), + common.HexToAddress("0x0"), ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 77114a16a..2a9b51fe6 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -32,7 +32,6 @@ import ( "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" @@ -214,10 +213,6 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha FutureSeconds: big.NewInt(10000), } - rollupMngr, tx, _, err := bridgegen.DeployEigenDADummyManager(&txOpts, l1Client) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1Client, tx) - // Require(t, err) // _, err = EnsureTxSucceeded(ctx, l1Client, tx) @@ -232,8 +227,6 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha timeBounds, big.NewInt(117964), reader4844, - rollupMngr, - rollupMngr, false, ) Require(t, err) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 6e5a825f5..2c1143548 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx)