diff --git a/.mockery.yaml b/.mockery.yaml index b1119926e1..106be368cb 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -61,6 +61,7 @@ packages: dir: ./block/internal/syncing pkgname: syncing filename: syncer_mock.go + github.com/evstack/ev-node/block/internal/common: interfaces: Broadcaster: diff --git a/apps/evm/go.mod b/apps/evm/go.mod index bd1b1a930b..151585228f 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -2,10 +2,10 @@ module github.com/evstack/ev-node/apps/evm go 1.25.0 -//replace ( -// github.com/evstack/ev-node => ../../ -// github.com/evstack/ev-node/execution/evm => ../../execution/evm -//) +replace ( + github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/execution/evm => ../../execution/evm +) require ( github.com/ethereum/go-ethereum v1.16.8 diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 109afc76fa..1da058e8e2 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -409,12 +409,8 @@ github.com/ethereum/go-ethereum v1.16.8 h1:LLLfkZWijhR5m6yrAXbdlTeXoqontH+Ga2f9i github.com/ethereum/go-ethereum v1.16.8/go.mod h1:Fs6QebQbavneQTYcA39PEKv2+zIjX7rPUZ14DER46wk= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= -github.com/evstack/ev-node v1.0.0-rc.2 h1:gUQzLTkCj6D751exm/FIR/yw2aXWiW2aEREEwtxMvw0= -github.com/evstack/ev-node v1.0.0-rc.2/go.mod h1:Qa2nN1D6PJQRU2tiarv6X5Der5OZg/+2QGY/K2mA760= github.com/evstack/ev-node/core v1.0.0-rc.1 h1:Dic2PMUMAYUl5JW6DkDj6HXDEWYzorVJQuuUJOV0FjE= github.com/evstack/ev-node/core v1.0.0-rc.1/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= -github.com/evstack/ev-node/execution/evm v1.0.0-rc.2 h1:t7os7ksmPhf2rWY2psVBowyc+iuneMDPwBGQaxSckus= -github.com/evstack/ev-node/execution/evm v1.0.0-rc.2/go.mod h1:ahxKQfPlJ5C7g15Eq9Mjn2tQnn59T0kIm9B10zDhcTI= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index 912f94c40c..c1b30d70e3 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -2,10 +2,10 @@ module github.com/evstack/ev-node/apps/grpc go 1.25.0 -//replace ( -// github.com/evstack/ev-node => ../../ -// github.com/evstack/ev-node/execution/grpc => ../../execution/grpc -//) +replace ( + github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/execution/grpc => ../../execution/grpc +) require ( github.com/evstack/ev-node v1.0.0-rc.2 diff --git a/apps/grpc/go.sum b/apps/grpc/go.sum index b99a59e18a..94c6bc63fa 100644 --- a/apps/grpc/go.sum +++ b/apps/grpc/go.sum @@ -365,12 +365,8 @@ github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6Ni github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evstack/ev-node v1.0.0-rc.2 h1:gUQzLTkCj6D751exm/FIR/yw2aXWiW2aEREEwtxMvw0= -github.com/evstack/ev-node v1.0.0-rc.2/go.mod h1:Qa2nN1D6PJQRU2tiarv6X5Der5OZg/+2QGY/K2mA760= github.com/evstack/ev-node/core v1.0.0-rc.1 h1:Dic2PMUMAYUl5JW6DkDj6HXDEWYzorVJQuuUJOV0FjE= github.com/evstack/ev-node/core v1.0.0-rc.1/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= -github.com/evstack/ev-node/execution/grpc v1.0.0-rc.1 h1:OzrWLDDY6/9+LWx0XmUqPzxs/CHZRJICOwQ0Me/i6dY= -github.com/evstack/ev-node/execution/grpc v1.0.0-rc.1/go.mod h1:Pr/sF6Zx8am9ZeWFcoz1jYPs0kXmf+OmL8Tz2Gyq7E4= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= diff --git a/block/components.go b/block/components.go index edf0eb38bb..903402cb12 100644 --- a/block/components.go +++ b/block/components.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/celestiaorg/go-header" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" @@ -20,6 +21,7 @@ import ( "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/pkg/sync" "github.com/evstack/ev-node/pkg/telemetry" "github.com/evstack/ev-node/types" ) @@ -127,8 +129,10 @@ func NewSyncComponents( store store.Store, exec coreexecutor.Executor, daClient da.Client, - headerStore common.Broadcaster[*types.SignedHeader], - dataStore common.Broadcaster[*types.Data], + headerStore header.Store[*types.P2PSignedHeader], + dataStore header.Store[*types.P2PData], + headerDAHintAppender submitting.DAHintAppender, + dataDAHintAppender submitting.DAHintAppender, logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -163,7 +167,7 @@ func NewSyncComponents( } // Create submitter for sync nodes (no signer, only DA inclusion processing) - var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerDAHintAppender, dataDAHintAppender) if config.Instrumentation.IsTracingEnabled() { daSubmitter = submitting.WithTracingDASubmitter(daSubmitter) } @@ -200,8 +204,8 @@ func NewAggregatorComponents( sequencer coresequencer.Sequencer, daClient da.Client, signer signer.Signer, - headerBroadcaster common.Broadcaster[*types.SignedHeader], - dataBroadcaster common.Broadcaster[*types.Data], + headerSyncService *sync.HeaderSyncService, + dataSyncService *sync.DataSyncService, logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -229,8 +233,8 @@ func NewAggregatorComponents( metrics, config, genesis, - headerBroadcaster, - dataBroadcaster, + headerSyncService, + dataSyncService, logger, blockOpts, errorCh, @@ -266,7 +270,7 @@ func NewAggregatorComponents( }, nil } - var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerSyncService, dataSyncService) if config.Instrumentation.IsTracingEnabled() { daSubmitter = submitting.WithTracingDASubmitter(daSubmitter) } diff --git a/block/components_test.go b/block/components_test.go index 87ec922563..f1fbac743b 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -22,8 +22,17 @@ import ( "github.com/evstack/ev-node/pkg/signer/noop" "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" + extmocks "github.com/evstack/ev-node/test/mocks/external" + "github.com/evstack/ev-node/types" ) +// noopDAHintAppender is a no-op implementation of DAHintAppender for testing +type noopDAHintAppender struct{} + +func (n noopDAHintAppender) AppendDAHint(ctx context.Context, daHeight uint64, heights ...uint64) error { + return nil +} + func TestBlockComponents_ExecutionClientFailure_StopsNode(t *testing.T) { // Test the error channel mechanism works as intended @@ -86,6 +95,14 @@ func TestNewSyncComponents_Creation(t *testing.T) { daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() + // Create mock P2P stores + mockHeaderStore := extmocks.NewMockStore[*types.P2PSignedHeader](t) + mockDataStore := extmocks.NewMockStore[*types.P2PData](t) + + // Create noop DAHintAppenders for testing + headerHintAppender := noopDAHintAppender{} + dataHintAppender := noopDAHintAppender{} + // Just test that the constructor doesn't panic - don't start the components // to avoid P2P store dependencies components, err := NewSyncComponents( @@ -94,8 +111,10 @@ func TestNewSyncComponents_Creation(t *testing.T) { memStore, mockExec, daClient, - nil, - nil, + mockHeaderStore, + mockDataStore, + headerHintAppender, + dataHintAppender, zerolog.Nop(), NopMetrics(), DefaultBlockOptions(), diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 69d0300f9f..f02a181de8 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -20,4 +20,7 @@ type DAHeightEvent struct { DaHeight uint64 // Source indicates where this event originated from (DA or P2P) Source EventSource + + // Optional DA height hints from P2P. first is the DA height hint for the header, second is the DA height hint for the data + DaHeightHints [2]uint64 } diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index e4bc7e472b..9f3a7fa1d1 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -3,11 +3,17 @@ package common import ( "context" + "github.com/evstack/ev-node/types" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/celestiaorg/go-header" ) +type ( + HeaderP2PBroadcaster = Broadcaster[*types.P2PSignedHeader] + DataP2PBroadcaster = Broadcaster[*types.P2PData] +) + // Broadcaster interface for P2P broadcasting type Broadcaster[H header.Header[H]] interface { WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 43aea63070..bf1b44b6cb 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -42,8 +42,8 @@ type Executor struct { metrics *common.Metrics // Broadcasting - headerBroadcaster common.Broadcaster[*types.SignedHeader] - dataBroadcaster common.Broadcaster[*types.Data] + headerBroadcaster common.HeaderP2PBroadcaster + dataBroadcaster common.DataP2PBroadcaster // Configuration config config.Config @@ -90,8 +90,8 @@ func NewExecutor( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerBroadcaster common.Broadcaster[*types.SignedHeader], - dataBroadcaster common.Broadcaster[*types.Data], + headerBroadcaster common.HeaderP2PBroadcaster, + dataBroadcaster common.DataP2PBroadcaster, logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -547,9 +547,13 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { e.setLastState(newState) // broadcast header and data to P2P network - g, broadcastCtx := errgroup.WithContext(ctx) - g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, header) }) - g.Go(func() error { return e.dataBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, data) }) + g, broadcastCtx := errgroup.WithContext(e.ctx) + g.Go(func() error { + return e.headerBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, &types.P2PSignedHeader{SignedHeader: header}) + }) + g.Go(func() error { + return e.dataBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, &types.P2PData{Data: data}) + }) if err := g.Wait(); err != nil { e.logger.Error().Err(err).Msg("failed to broadcast header and/data") // don't fail block production on broadcast error diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index 10875e0bc6..8c2cb8cef9 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -47,9 +47,9 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( @@ -163,9 +163,9 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 8e05d7b7de..0389bbf643 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,9 +69,9 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockSeq := testmocks.NewMockSequencer(t) // Broadcasters are required by produceBlock; use generated mocks - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( @@ -160,9 +160,9 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index bd8ad8e98c..571bc75214 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -48,9 +48,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create first executor instance mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db1 := common.NewMockBroadcaster[*types.Data](t) + db1 := common.NewMockBroadcaster[*types.P2PData](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( @@ -171,9 +171,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create second executor instance (restart scenario) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db2 := common.NewMockBroadcaster[*types.Data](t) + db2 := common.NewMockBroadcaster[*types.P2PData](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( @@ -275,9 +275,9 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create first executor and produce one block mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db1 := common.NewMockBroadcaster[*types.Data](t) + db1 := common.NewMockBroadcaster[*types.P2PData](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( @@ -336,9 +336,9 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create second executor (restart) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db2 := common.NewMockBroadcaster[*types.Data](t) + db2 := common.NewMockBroadcaster[*types.P2PData](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 26cf249854..1099cdb87d 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,7 +1,6 @@ package executing import ( - "context" "testing" "time" @@ -40,8 +39,8 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { } // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) - dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) + headerBroadcaster := common.NewMockBroadcaster[*types.P2PSignedHeader](t) + dataBroadcaster := common.NewMockBroadcaster[*types.P2PData](t) // Create executor with broadcasters executor, err := NewExecutor( @@ -122,48 +121,3 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } - -func TestExecutor_BroadcastFlow(t *testing.T) { - // This test demonstrates how the broadcast flow works - // when an Executor produces a block - - // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) - dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) - - // Create sample data that would be broadcast - sampleHeader := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: "test-chain", - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - }, - } - - sampleData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - Txs: []types.Tx{}, - } - - // Test broadcast calls - ctx := context.Background() - - // Set up expectations - headerBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleHeader).Return(nil).Once() - dataBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleData).Return(nil).Once() - - // Simulate what happens in produceBlock() after block creation - err := headerBroadcaster.WriteToStoreAndBroadcast(ctx, sampleHeader) - require.NoError(t, err) - - err = dataBroadcaster.WriteToStoreAndBroadcast(ctx, sampleData) - require.NoError(t, err) - - // Verify expectations were met (automatically checked by testify mock on cleanup) -} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index dde6140c5b..a2e0adcf75 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -103,14 +103,20 @@ func clamp(v, min, max time.Duration) time.Duration { return v } +type DAHintAppender interface { + AppendDAHint(ctx context.Context, daHeight uint64, heights ...uint64) error +} + // DASubmitter handles DA submission operations type DASubmitter struct { - client da.Client - config config.Config - genesis genesis.Genesis - options common.BlockOptions - logger zerolog.Logger - metrics *common.Metrics + client da.Client + config config.Config + genesis genesis.Genesis + options common.BlockOptions + logger zerolog.Logger + metrics *common.Metrics + headerDAHintAppender DAHintAppender + dataDAHintAppender DAHintAppender // address selector for multi-account support addressSelector pkgda.AddressSelector @@ -135,6 +141,8 @@ func NewDASubmitter( options common.BlockOptions, metrics *common.Metrics, logger zerolog.Logger, + headerDAHintAppender DAHintAppender, + dataDAHintAppender DAHintAppender, ) *DASubmitter { daSubmitterLogger := logger.With().Str("component", "da_submitter").Logger() @@ -172,15 +180,17 @@ func NewDASubmitter( } return &DASubmitter{ - client: client, - config: config, - genesis: genesis, - options: options, - metrics: metrics, - logger: daSubmitterLogger, - addressSelector: addressSelector, - envelopeCache: envelopeCache, - signingWorkers: workers, + client: client, + config: config, + genesis: genesis, + options: options, + metrics: metrics, + logger: daSubmitterLogger, + addressSelector: addressSelector, + envelopeCache: envelopeCache, + signingWorkers: workers, + headerDAHintAppender: headerDAHintAppender, + dataDAHintAppender: dataDAHintAppender, } } @@ -222,8 +232,15 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, headers []*types.Signed return submitToDA(s, ctx, headers, envelopes, func(submitted []*types.SignedHeader, res *datypes.ResultSubmit) { - for _, header := range submitted { - cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) + heights := make([]uint64, len(submitted)) + for i, header := range submitted { + headerHash := header.Hash() + cache.SetHeaderDAIncluded(headerHash.String(), res.Height, header.Height()) + heights[i] = header.Height() + } + if err := s.headerDAHintAppender.AppendDAHint(ctx, res.Height, heights...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in header p2p store") + // ignoring error here, since we don't want to block the block submission' } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() @@ -423,8 +440,14 @@ func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types. return submitToDA(s, ctx, signedDataList, signedDataListBz, func(submitted []*types.SignedData, res *datypes.ResultSubmit) { - for _, sd := range submitted { + heights := make([]uint64, len(submitted)) + for i, sd := range submitted { cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) + heights[i] = sd.Height() + } + if err := s.dataDAHintAppender.AppendDAHint(ctx, res.Height, heights...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in data p2p store") + // ignoring error here, since we don't want to block the block submission' } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 0c23f7d08a..8672cd1ae2 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -96,7 +96,7 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( Return(func(_ context.Context, blobs [][]byte, _ float64, _ []byte, _ []byte) datypes.ResultSubmit { return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(len(blobs)), Height: 1}} }).Twice() - daSubmitter := NewDASubmitter(client, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) + daSubmitter := NewDASubmitter(client, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), noopDAHintAppender{}, noopDAHintAppender{}) // Submit headers and data - cache returns both items and marshalled bytes headers, marshalledHeaders, err := cm.GetPendingHeaders(context.Background()) @@ -118,3 +118,9 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( assert.True(t, ok) } + +type noopDAHintAppender struct{} + +func (n noopDAHintAppender) AppendDAHint(ctx context.Context, daHeight uint64, heights ...uint64) error { + return nil +} diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index 2421b5aab3..2d79208e92 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -35,7 +35,7 @@ func newTestSubmitter(t *testing.T, mockClient *mocks.MockClient, override func( mockClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() mockClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() mockClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - return NewDASubmitter(mockClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) + return NewDASubmitter(mockClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil, nil) } func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index 476011fe81..cb00b36da8 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -71,6 +71,8 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), + noopDAHintAppender{}, + noopDAHintAppender{}, ) return daSubmitter, st, cm, mockDA, gen @@ -118,6 +120,8 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), + noopDAHintAppender{}, + noopDAHintAppender{}, ) require.NotNil(t, server.GetDAVisualizationServer()) diff --git a/block/internal/submitting/submitter.go b/block/internal/submitting/submitter.go index 68fadbef00..cac8ebd1cc 100644 --- a/block/internal/submitting/submitter.go +++ b/block/internal/submitting/submitter.go @@ -129,19 +129,11 @@ func (s *Submitter) Start(ctx context.Context) error { // Start DA submission loop if signer is available (aggregator nodes only) if s.signer != nil { s.logger.Info().Msg("starting DA submission loop") - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.daSubmissionLoop() - }() + s.wg.Go(s.daSubmissionLoop) } // Start DA inclusion processing loop (both sync and aggregator nodes) - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.processDAInclusionLoop() - }() + s.wg.Go(s.processDAInclusionLoop) return nil } @@ -345,9 +337,9 @@ func (s *Submitter) processDAInclusionLoop() { s.logger.Debug().Uint64("height", nextHeight).Msg("advancing DA included height") - // Set sequencer height to DA height mapping using already retrieved data - if err := s.setSequencerHeightToDAHeight(s.ctx, nextHeight, header, data, currentDAIncluded == 0); err != nil { - s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("failed to set sequencer height to DA height mapping") + // Set node height to DA height mapping using already retrieved data + if err := s.setNodeHeightToDAHeight(s.ctx, nextHeight, header, data, currentDAIncluded == 0); err != nil { + s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("failed to set node height to DA height mapping") break } @@ -435,14 +427,14 @@ func (s *Submitter) sendCriticalError(err error) { } } -// setSequencerHeightToDAHeight stores the mapping from a ev-node block height to the corresponding +// setNodeHeightToDAHeight stores the mapping from a ev-node block height to the corresponding // DA (Data Availability) layer heights where the block's header and data were included. // This mapping is persisted in the store metadata and is used to track which DA heights // contain the block components for a given ev-node height. // // For blocks with empty transactions, both header and data use the same DA height since // empty transaction data is not actually published to the DA layer. -func (s *Submitter) setSequencerHeightToDAHeight(ctx context.Context, height uint64, header *types.SignedHeader, data *types.Data, genesisInclusion bool) error { +func (s *Submitter) setNodeHeightToDAHeight(ctx context.Context, height uint64, header *types.SignedHeader, data *types.Data, genesisInclusion bool) error { headerHash, dataHash := header.Hash(), data.DACommitment() headerDaHeightBytes := make([]byte, 8) diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index 07703be94a..3e0e0b343d 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -167,7 +167,7 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { daClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -191,10 +191,10 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { mockStore.On("SetMetadata", mock.Anything, dataKey, dBz).Return(nil).Once() mockStore.On("SetMetadata", mock.Anything, store.GenesisDAHeightKey, gBz).Return(nil).Once() - require.NoError(t, s.setSequencerHeightToDAHeight(ctx, 1, h, d, true)) + require.NoError(t, s.setNodeHeightToDAHeight(ctx, 1, h, d, true)) } -func TestSubmitter_setSequencerHeightToDAHeight_Errors(t *testing.T) { +func TestSubmitter_setNodeHeightToDAHeight_Errors(t *testing.T) { ctx := t.Context() cm, st := newTestCacheAndStore(t) @@ -205,11 +205,11 @@ func TestSubmitter_setSequencerHeightToDAHeight_Errors(t *testing.T) { // No cache entries -> expect error on missing header _, ok := cm.GetHeaderDAIncluded(h.Hash().String()) assert.False(t, ok) - assert.Error(t, s.setSequencerHeightToDAHeight(ctx, 1, h, d, false)) + assert.Error(t, s.setNodeHeightToDAHeight(ctx, 1, h, d, false)) // Add header, missing data cm.SetHeaderDAIncluded(h.Hash().String(), 10, 1) - assert.Error(t, s.setSequencerHeightToDAHeight(ctx, 1, h, d, false)) + assert.Error(t, s.setNodeHeightToDAHeight(ctx, 1, h, d, false)) } func TestSubmitter_initializeDAIncludedHeight(t *testing.T) { @@ -251,7 +251,7 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { daClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -457,7 +457,7 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { daClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 506840f5c3..a6c9d43c7c 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,6 +5,8 @@ import ( "context" "errors" "fmt" + "slices" + "sync" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" @@ -20,7 +22,13 @@ import ( // DARetriever defines the interface for retrieving events from the DA layer type DARetriever interface { + // RetrieveFromDA retrieves blocks from the specified DA height and returns height events RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) + // QueuePriorityHeight queues a DA height for priority retrieval (from P2P hints). + // These heights take precedence over sequential fetching. + QueuePriorityHeight(daHeight uint64) + // PopPriorityHeight returns the next priority height to fetch, or 0 if none. + PopPriorityHeight() uint64 } // daRetriever handles DA retrieval operations for syncing @@ -38,6 +46,12 @@ type daRetriever struct { // strictMode indicates if the node has seen a valid DAHeaderEnvelope // and should now reject all legacy/unsigned headers. strictMode bool + + // priorityMu protects priorityHeights from concurrent access + priorityMu sync.Mutex + // priorityHeights holds DA heights from P2P hints that should be fetched + // before continuing sequential retrieval. Sorted in ascending order. + priorityHeights []uint64 } // NewDARetriever creates a new DA retriever @@ -48,14 +62,43 @@ func NewDARetriever( logger zerolog.Logger, ) *daRetriever { return &daRetriever{ - client: client, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), - strictMode: false, + client: client, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), + strictMode: false, + priorityHeights: make([]uint64, 0), + } +} + +// QueuePriorityHeight queues a DA height for priority retrieval. +// Heights from P2P hints take precedence over sequential fetching. +func (r *daRetriever) QueuePriorityHeight(daHeight uint64) { + r.priorityMu.Lock() + defer r.priorityMu.Unlock() + + idx, found := slices.BinarySearch(r.priorityHeights, daHeight) + if found { + return // Already queued } + r.priorityHeights = slices.Insert(r.priorityHeights, idx, daHeight) +} + +// PopPriorityHeight returns the next priority height to fetch, or 0 if none. +func (r *daRetriever) PopPriorityHeight() uint64 { + r.priorityMu.Lock() + defer r.priorityMu.Unlock() + + if len(r.priorityHeights) == 0 { + return 0 + } + + height := r.priorityHeights[0] + r.priorityHeights = r.priorityHeights[1:] + + return height } // RetrieveFromDA retrieves blocks from the specified DA height and returns height events @@ -360,7 +403,7 @@ func isEmptyDataExpected(header *types.SignedHeader) bool { } // createEmptyDataForHeader creates empty data for a header -func createEmptyDataForHeader(ctx context.Context, header *types.SignedHeader) *types.Data { +func createEmptyDataForHeader(_ context.Context, header *types.SignedHeader) *types.Data { return &types.Data{ Txs: make(types.Txs, 0), Metadata: &types.Metadata{ diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go index d94dff4d62..2e191c8851 100644 --- a/block/internal/syncing/da_retriever_mock.go +++ b/block/internal/syncing/da_retriever_mock.go @@ -38,6 +38,90 @@ func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { return &MockDARetriever_Expecter{mock: &_m.Mock} } +// PopPriorityHeight provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) PopPriorityHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for PopPriorityHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// MockDARetriever_PopPriorityHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PopPriorityHeight' +type MockDARetriever_PopPriorityHeight_Call struct { + *mock.Call +} + +// PopPriorityHeight is a helper method to define mock.On call +func (_e *MockDARetriever_Expecter) PopPriorityHeight() *MockDARetriever_PopPriorityHeight_Call { + return &MockDARetriever_PopPriorityHeight_Call{Call: _e.mock.On("PopPriorityHeight")} +} + +func (_c *MockDARetriever_PopPriorityHeight_Call) Run(run func()) *MockDARetriever_PopPriorityHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockDARetriever_PopPriorityHeight_Call) Return(v uint64) *MockDARetriever_PopPriorityHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *MockDARetriever_PopPriorityHeight_Call) RunAndReturn(run func() uint64) *MockDARetriever_PopPriorityHeight_Call { + _c.Call.Return(run) + return _c +} + +// QueuePriorityHeight provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) QueuePriorityHeight(daHeight uint64) { + _mock.Called(daHeight) + return +} + +// MockDARetriever_QueuePriorityHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueuePriorityHeight' +type MockDARetriever_QueuePriorityHeight_Call struct { + *mock.Call +} + +// QueuePriorityHeight is a helper method to define mock.On call +// - daHeight uint64 +func (_e *MockDARetriever_Expecter) QueuePriorityHeight(daHeight interface{}) *MockDARetriever_QueuePriorityHeight_Call { + return &MockDARetriever_QueuePriorityHeight_Call{Call: _e.mock.On("QueuePriorityHeight", daHeight)} +} + +func (_c *MockDARetriever_QueuePriorityHeight_Call) Run(run func(daHeight uint64)) *MockDARetriever_QueuePriorityHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockDARetriever_QueuePriorityHeight_Call) Return() *MockDARetriever_QueuePriorityHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockDARetriever_QueuePriorityHeight_Call) RunAndReturn(run func(daHeight uint64)) *MockDARetriever_QueuePriorityHeight_Call { + _c.Run(run) + return _c +} + // RetrieveFromDA provides a mock function for the type MockDARetriever func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { ret := _mock.Called(ctx, daHeight) diff --git a/block/internal/syncing/da_retriever_tracing.go b/block/internal/syncing/da_retriever_tracing.go index 894fc67ba1..2bc7a4094d 100644 --- a/block/internal/syncing/da_retriever_tracing.go +++ b/block/internal/syncing/da_retriever_tracing.go @@ -55,3 +55,11 @@ func (t *tracedDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) return events, nil } + +func (t *tracedDARetriever) QueuePriorityHeight(daHeight uint64) { + t.inner.QueuePriorityHeight(daHeight) +} + +func (t *tracedDARetriever) PopPriorityHeight() uint64 { + return t.inner.PopPriorityHeight() +} diff --git a/block/internal/syncing/da_retriever_tracing_test.go b/block/internal/syncing/da_retriever_tracing_test.go index d83ed99d23..99ce1eb639 100644 --- a/block/internal/syncing/da_retriever_tracing_test.go +++ b/block/internal/syncing/da_retriever_tracing_test.go @@ -27,6 +27,10 @@ func (m *mockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ( return nil, nil } +func (m *mockDARetriever) QueuePriorityHeight(daHeight uint64) {} + +func (m *mockDARetriever) PopPriorityHeight() uint64 { return 0 } + func setupDARetrieverTrace(t *testing.T, inner DARetriever) (DARetriever, *tracetest.SpanRecorder) { t.Helper() sr := tracetest.NewSpanRecorder() diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index d8c10bc4c3..49bbaa3f12 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -6,7 +6,7 @@ import ( "fmt" "sync/atomic" - goheader "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" @@ -27,8 +27,8 @@ type p2pHandler interface { // The handler maintains a processedHeight to track the highest block that has been // successfully validated and sent to the syncer, preventing duplicate processing. type P2PHandler struct { - headerStore goheader.Store[*types.SignedHeader] - dataStore goheader.Store[*types.Data] + headerStore header.Store[*types.P2PSignedHeader] + dataStore header.Store[*types.P2PData] cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -38,8 +38,8 @@ type P2PHandler struct { // NewP2PHandler creates a new P2P handler. func NewP2PHandler( - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore header.Store[*types.P2PSignedHeader], + dataStore header.Store[*types.P2PData], cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, @@ -74,29 +74,28 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } - header, err := h.headerStore.GetByHeight(ctx, height) + p2pHeader, err := h.headerStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("header unavailable in store") } return err } - if err := h.assertExpectedProposer(header.ProposerAddress); err != nil { + if err := h.assertExpectedProposer(p2pHeader.ProposerAddress); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } - data, err := h.dataStore.GetByHeight(ctx, height) + p2pData, err := h.dataStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("data unavailable in store") } return err } - - dataCommitment := data.DACommitment() - if !bytes.Equal(header.DataHash[:], dataCommitment[:]) { - err := fmt.Errorf("data hash mismatch: header %x, data %x", header.DataHash, dataCommitment) + dataCommitment := p2pData.DACommitment() + if !bytes.Equal(p2pHeader.DataHash[:], dataCommitment[:]) { + err := fmt.Errorf("data hash mismatch: header %x, data %x", p2pHeader.DataHash, dataCommitment) h.logger.Warn().Uint64("height", height).Err(err).Msg("discarding inconsistent block from P2P") return err } @@ -104,10 +103,10 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC // further header validation (signature) is done in validateBlock. // we need to be sure that the previous block n-1 was executed before validating block n event := common.DAHeightEvent{ - Header: header, - Data: data, - DaHeight: 0, - Source: common.SourceP2P, + Header: p2pHeader.SignedHeader, + Data: p2pData.Data, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{p2pHeader.DAHint(), p2pData.DAHint()}, } select { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index dfab41faae..40c6876d84 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -37,7 +37,7 @@ func buildTestSigner(t *testing.T) ([]byte, crypto.PubKey, signerpkg.Signer) { } // p2pMakeSignedHeader creates a minimally valid SignedHeader for P2P tests. -func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer) *types.SignedHeader { +func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer) *types.P2PSignedHeader { t.Helper() hdr := &types.SignedHeader{ Header: types.Header{ @@ -51,14 +51,14 @@ func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer [ sig, err := signer.Sign(bz) require.NoError(t, err, "failed to sign header bytes") hdr.Signature = sig - return hdr + return &types.P2PSignedHeader{SignedHeader: hdr} } // P2PTestData aggregates dependencies used by P2P handler tests. type P2PTestData struct { Handler *P2PHandler - HeaderStore *extmocks.MockStore[*types.SignedHeader] - DataStore *extmocks.MockStore[*types.Data] + HeaderStore *extmocks.MockStore[*types.P2PSignedHeader] + DataStore *extmocks.MockStore[*types.P2PData] Cache cache.CacheManager Genesis genesis.Genesis ProposerAddr []byte @@ -73,8 +73,8 @@ func setupP2P(t *testing.T) *P2PTestData { gen := genesis.Genesis{ChainID: "p2p-test", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: proposerAddr} - headerStoreMock := extmocks.NewMockStore[*types.SignedHeader](t) - dataStoreMock := extmocks.NewMockStore[*types.Data](t) + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) cfg := config.Config{ RootDir: t.TempDir(), @@ -129,7 +129,7 @@ func TestP2PHandler_ProcessHeight_EmitsEventWhenHeaderAndDataPresent(t *testing. require.Equal(t, string(p.Genesis.ProposerAddress), string(p.ProposerAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 5, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 5, 1) + data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 5, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) @@ -155,7 +155,7 @@ func TestP2PHandler_ProcessHeight_SkipsWhenDataMissing(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 7, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 7, 1) + data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 7, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) @@ -225,7 +225,7 @@ func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { // Height 6 should be fetched normally. header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 6, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 6, 1) + data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 6, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) @@ -248,7 +248,7 @@ func TestP2PHandler_SetProcessedHeightPreventsDuplicates(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 8, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 8, 0) + data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 8, 0)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index a3f999a056..88f46aafeb 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -14,16 +14,17 @@ import ( "sync/atomic" "time" + "github.com/celestiaorg/go-header" coreexecutor "github.com/evstack/ev-node/core/execution" - datypes "github.com/evstack/ev-node/pkg/da/types" - "github.com/evstack/ev-node/pkg/raft" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/pkg/config" + datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/raft" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" ) @@ -95,8 +96,8 @@ type Syncer struct { daRetrieverHeight *atomic.Uint64 // P2P stores - headerStore common.Broadcaster[*types.SignedHeader] - dataStore common.Broadcaster[*types.Data] + headerStore header.Store[*types.P2PSignedHeader] + dataStore header.Store[*types.P2PData] // Channels for coordination heightInCh chan common.DAHeightEvent @@ -144,8 +145,8 @@ func NewSyncer( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerStore common.Broadcaster[*types.SignedHeader], - dataStore common.Broadcaster[*types.Data], + headerStore header.Store[*types.P2PSignedHeader], + dataStore header.Store[*types.P2PData], logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -215,8 +216,9 @@ func (s *Syncer) Start(ctx context.Context) error { if s.config.Instrumentation.IsTracingEnabled() { s.daRetriever = WithTracingDARetriever(s.daRetriever) } + s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.logger, s.config, s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion) - s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) + s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") } else { @@ -234,11 +236,7 @@ func (s *Syncer) Start(ctx context.Context) error { } // Start main processing loop - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.processLoop() - }() + s.wg.Go(s.processLoop) // Start dedicated workers for DA, and pending processing s.startSyncWorkers() @@ -428,7 +426,19 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { default: } - daHeight := max(s.daRetrieverHeight.Load(), s.cache.DaHeight()) + // Check for priority heights from P2P hints first + var daHeight uint64 + if priorityHeight := s.daRetriever.PopPriorityHeight(); priorityHeight > 0 { + // Skip if we've already fetched past this height + currentHeight := s.daRetrieverHeight.Load() + if priorityHeight < currentHeight { + continue + } + daHeight = priorityHeight + s.logger.Debug().Uint64("da_height", daHeight).Msg("fetching priority DA height from P2P hint") + } else { + daHeight = max(s.daRetrieverHeight.Load(), s.cache.DaHeight()) + } events, err := s.daRetriever.RetrieveFromDA(s.ctx, daHeight) if err != nil { @@ -457,8 +467,19 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { } } - // increment DA retrieval height on successful retrieval - s.daRetrieverHeight.Store(daHeight + 1) + // Update DA retrieval height on successful retrieval + // For priority fetches, only update if the priority height is ahead of current + // For sequential fetches, always increment + newHeight := daHeight + 1 + for { + current := s.daRetrieverHeight.Load() + if newHeight <= current { + break // Already at or past this height + } + if s.daRetrieverHeight.CompareAndSwap(current, newHeight) { + break + } + } } } @@ -591,6 +612,53 @@ func (s *Syncer) processHeightEvent(ctx context.Context, event *common.DAHeightE return } + // If this is a P2P event with a DA height hint, trigger targeted DA retrieval + // This allows us to fetch the block directly from the specified DA height instead of sequential scanning + if event.Source == common.SourceP2P { + var daHeightHints []uint64 + switch { + case event.DaHeightHints == [2]uint64{0, 0}: + // empty, nothing to do + case event.DaHeightHints[0] == 0: + // check only data + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[1]} + } + case event.DaHeightHints[1] == 0: + // check only header + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[0]} + } + default: + // check both + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[0]} + } + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = append(daHeightHints, event.DaHeightHints[1]) + } + if len(daHeightHints) == 2 && daHeightHints[0] == daHeightHints[1] { + daHeightHints = daHeightHints[0:1] + } + } + if len(daHeightHints) > 0 { + for _, daHeightHint := range daHeightHints { + // Skip if we've already fetched past this height + if daHeightHint < s.daRetrieverHeight.Load() { + continue + } + + s.logger.Debug(). + Uint64("height", height). + Uint64("da_height_hint", daHeightHint). + Msg("P2P event with DA height hint, queuing priority DA retrieval") + + // Queue priority DA retrieval - will be processed in fetchDAUntilCaughtUp + s.daRetriever.QueuePriorityHeight(daHeightHint) + } + } + } + // Last data must be got from store if the event comes from DA and the data hash is empty. // When if the event comes from P2P, the sequencer and then all the full nodes contains the data. if event.Source == common.SourceDA && bytes.Equal(event.Header.DataHash, common.DataHashForEmptyTxs) && currentHeight > 0 { diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index de99be6dac..7bc6b4f5c6 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -76,6 +76,9 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { syncer.p2pHandler = p2pHandler p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() + // Mock PopPriorityHeight to always return 0 (no priority heights) + daRetriever.On("PopPriorityHeight").Return(uint64(0)).Maybe() + // Create mock stores for P2P mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() @@ -83,14 +86,6 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time callCount := 0 @@ -172,6 +167,9 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { syncer.p2pHandler = p2pHandler p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() + // Mock PopPriorityHeight to always return 0 (no priority heights) + daRetriever.On("PopPriorityHeight").Return(uint64(0)).Maybe() + // Create mock stores for P2P mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() @@ -179,14 +177,6 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time // First call - error (should trigger backoff) @@ -262,6 +252,9 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler + // Mock PopPriorityHeight to always return 0 (no priority heights) + daRetriever.On("PopPriorityHeight").Return(uint64(0)).Maybe() + // Create mock stores for P2P mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() @@ -269,14 +262,6 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() @@ -350,8 +335,8 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 3086551c52..a6529d5562 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -12,6 +12,7 @@ import ( "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" + extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" @@ -135,6 +136,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel daR := NewMockDARetriever(b) + daR.On("PopPriorityHeight").Return(uint64(0)).Maybe() for i := uint64(0); i < totalHeights; i++ { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). @@ -154,9 +156,9 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay mockP2P := newMockp2pHandler(b) // not used directly in this benchmark path mockP2P.On("SetProcessedHeight", mock.Anything).Return().Maybe() s.p2pHandler = mockP2P - headerP2PStore := common.NewMockBroadcaster[*types.SignedHeader](b) + headerP2PStore := extmocks.NewMockStore[*types.P2PSignedHeader](b) s.headerStore = headerP2PStore - dataP2PStore := common.NewMockBroadcaster[*types.Data](b) + dataP2PStore := extmocks.NewMockStore[*types.P2PData](b) s.dataStore = dataP2PStore return &benchFixture{s: s, st: st, cm: cm, cancel: cancel} } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index ae0fadf9c9..075b6f1694 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -21,6 +21,7 @@ import ( "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" + extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -399,8 +400,8 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -475,8 +476,8 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -581,8 +582,8 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -691,8 +692,8 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -761,8 +762,8 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -829,8 +830,8 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -955,8 +956,8 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -1047,8 +1048,8 @@ func TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 6a164c1f72..5edec1cce5 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -123,8 +123,8 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -175,8 +175,8 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), errChan, @@ -230,8 +230,8 @@ func TestSequentialBlockSync(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), errChan, @@ -349,11 +349,8 @@ func TestSyncLoopPersistState(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeader](t) - mockP2PHeaderStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - - mockP2PDataStore := common.NewMockBroadcaster[*types.Data](t) - mockP2PDataStore.EXPECT().Store().Return(mockDataStore).Maybe() + mockP2PHeaderStore := extmocks.NewMockStore[*types.P2PSignedHeader](t) + mockP2PDataStore := extmocks.NewMockStore[*types.P2PData](t) errorCh := make(chan error, 1) syncerInst1 := NewSyncer( @@ -378,6 +375,7 @@ func TestSyncLoopPersistState(t *testing.T) { daRtrMock, p2pHndlMock := NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() + daRtrMock.On("PopPriorityHeight").Return(uint64(0)).Maybe() syncerInst1.daRetriever, syncerInst1.p2pHandler = daRtrMock, p2pHndlMock // with n da blobs fetched @@ -469,6 +467,7 @@ func TestSyncLoopPersistState(t *testing.T) { daRtrMock, p2pHndlMock = NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() + daRtrMock.On("PopPriorityHeight").Return(uint64(0)).Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock daRtrMock.On("RetrieveFromDA", mock.Anything, mock.Anything). @@ -712,3 +711,148 @@ func TestSyncer_getHighestStoredDAHeight(t *testing.T) { highestDA = syncer.getHighestStoredDAHeight() assert.Equal(t, uint64(200), highestDA, "should return highest DA height from most recent included height") } + +func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), nil).Once() + + s := NewSyncer( + st, + mockExec, + nil, + cm, + common.NopMetrics(), + cfg, + gen, + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + nil, + ) + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Create a real daRetriever to test priority queue + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + + // Create event with DA height hint + evt := common.DAHeightEvent{ + Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 2}}}, + Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 2}}, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{100, 100}, + } + + // Current height is 0 (from init), event height is 2. + // processHeightEvent checks: + // 1. height <= currentHeight (2 <= 0 -> false) + // 2. height != currentHeight+1 (2 != 1 -> true) -> stores as pending event + + // We need to simulate height 1 being processed first so height 2 is "next" + // OR we can just test that it DOES NOT trigger DA retrieval if it's pending. + // Wait, the logic for DA retrieval is BEFORE the "next block" check? + // Let's check syncer.go... + // Yes, "If this is a P2P event with a DA height hint, trigger targeted DA retrieval" block is AFTER "If this is not the next block in sequence... return" + + // So we need to be at height 1 to process height 2. + // Let's set the store height to 1. + batch, err := st.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + s.processHeightEvent(t.Context(), &evt) + + // Verify that the priority height was queued in the daRetriever + priorityHeight := s.daRetriever.PopPriorityHeight() + assert.Equal(t, uint64(100), priorityHeight) +} + +func TestProcessHeightEvent_SkipsDAHintWhenAlreadyFetched(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), nil).Once() + + s := NewSyncer( + st, + mockExec, + nil, + cm, + common.NopMetrics(), + cfg, + gen, + extmocks.NewMockStore[*types.P2PSignedHeader](t), + extmocks.NewMockStore[*types.P2PData](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + nil, + ) + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Create a real daRetriever to test priority queue + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + + // Set DA retriever height to 150 - simulating we've already fetched past height 100 + s.daRetrieverHeight.Store(150) + + // Set the store height to 1 so the event can be processed + batch, err := st.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Create event with DA height hint that is BELOW the current daRetrieverHeight + evt := common.DAHeightEvent{ + Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 2}}}, + Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 2}}, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{100, 100}, // Both hints are below 150 + } + + s.processHeightEvent(t.Context(), &evt) + + // Verify that no priority height was queued since we've already fetched past it + priorityHeight := s.daRetriever.PopPriorityHeight() + assert.Equal(t, uint64(0), priorityHeight, "should not queue DA hint that is below current daRetrieverHeight") + + // Now test with a hint that is ABOVE the current daRetrieverHeight + evt2 := common.DAHeightEvent{ + Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 3}}}, + Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 3}}, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{200, 200}, // Both hints are above 150 + } + + // Set the store height to 2 so the event can be processed + batch, err = st.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SetHeight(2)) + require.NoError(t, batch.Commit()) + + s.processHeightEvent(t.Context(), &evt2) + + // Verify that the priority height WAS queued since it's above daRetrieverHeight + priorityHeight = s.daRetriever.PopPriorityHeight() + assert.Equal(t, uint64(200), priorityHeight, "should queue DA hint that is above current daRetrieverHeight") +} diff --git a/docs/learn/specs/header-sync.md b/docs/learn/specs/header-sync.md index 750f325933..ae237f9233 100644 --- a/docs/learn/specs/header-sync.md +++ b/docs/learn/specs/header-sync.md @@ -4,13 +4,13 @@ The nodes in the P2P network sync headers and data using separate sync services that implement the [go-header][go-header] interface. Evolve uses a header/data separation architecture where headers and transaction data are synchronized independently through parallel services. Each sync service consists of several components as listed below. -|Component|Description| -|---|---| -|store| a prefixed [datastore][datastore] where synced items are stored (`headerSync` prefix for headers, `dataSync` prefix for data)| -|subscriber| a [libp2p][libp2p] node pubsub subscriber for the specific data type| -|P2P server| a server for handling requests between peers in the P2P network| -|exchange| a client that enables sending in/out-bound requests from/to the P2P network| -|syncer| a service for efficient synchronization. When a P2P node falls behind and wants to catch up to the latest network head via P2P network, it can use the syncer.| +| Component | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| store | a prefixed [datastore][datastore] where synced items are stored (`headerSync` prefix for headers, `dataSync` prefix for data) | +| subscriber | a [libp2p][libp2p] node pubsub subscriber for the specific data type | +| P2P server | a server for handling requests between peers in the P2P network | +| exchange | a client that enables sending in/out-bound requests from/to the P2P network | +| syncer | a service for efficient synchronization. When a P2P node falls behind and wants to catch up to the latest network head via P2P network, it can use the syncer. | ## Details @@ -22,7 +22,7 @@ Evolve implements two separate sync services: - Used by all node types (sequencer, full, and light) - Essential for maintaining the canonical view of the chain -### Data Sync Service +### Data Sync Service - Synchronizes `Data` structures containing transaction data - Used only by full nodes and sequencers @@ -90,6 +90,70 @@ The block components integrate with both services through: - The Executor component publishes headers and data through broadcast channels - Separate stores and channels manage header and data synchronization +## DA Height Hints + +DA Height Hints (DAHint) provide an optimization for P2P synchronization by indicating which DA layer height contains a block's header or data. This allows syncing nodes to fetch missing DA data directly instead of performing sequential DA scanning. + +### Naming Considerations + +The naming convention follows this pattern: + +| Name | Usage | +| ----------------- | ---------------------------------------------------------- | +| `DAHeightHint` | Internal struct field storing the hint value | +| `DAHint()` | Getter method returning the DA height hint | +| `SetDAHint()` | Setter method for the DA height hint | +| `P2PSignedHeader` | Wrapper around `SignedHeader` that includes `DAHeightHint` | +| `P2PData` | Wrapper around `Data` that includes `DAHeightHint` | + +The term "hint" is used deliberately because: + +1. **It's advisory, not authoritative**: The hint suggests where to find data on the DA layer, but the authoritative source is always the DA layer itself +2. **It may be absent**: Hints are only populated during certain sync scenarios (see below) +3. **It optimizes but doesn't replace**: Nodes can still function without hints by scanning the DA layer sequentially + +### When DAHints Are Populated + +DAHints are **only populated when a node catches up from P2P** and is not yet synced to the head. When a node is already synced to the head: + +- The executor broadcasts headers/data immediately after block creation +- At this point, DA submission has not occurred yet (it happens later in the flow) +- Therefore, the broadcasted P2P messages do not contain DA hints + +This means: + +- **Syncing nodes** (catching up): Receive headers/data with DA hints populated +- **Synced nodes** (at head): Receive headers/data without DA hints + +The DA hints are set by the DA submitter after successful inclusion on the DA layer and stored for later P2P propagation to syncing peers. + +### Implementation Details + +The P2P wrapper types (`P2PSignedHeader` and `P2PData`) extend the base types with an optional `DAHeightHint` field: + +- Uses protobuf optional fields (`optional uint64 da_height_hint`) for backward compatibility +- Old nodes can still unmarshal new messages (the hint field is simply ignored) +- New nodes can unmarshal old messages (the hint field defaults to zero/absent) + +The hint flow: + +1. **Set by the DA Submitter** when headers/data are successfully included on the DA layer +2. **Stored in the P2P store** alongside the header/data +3. **Propagated via P2P** when syncing nodes request blocks +4. **Queued as priority** by the Syncer's DA retriever when received via P2P +5. **Fetched before sequential heights** - priority heights take precedence over normal DA scanning + +### Priority Queue Mechanism + +When a P2P event arrives with a DA height hint, the hint is queued as a priority height in the DA retriever. The `fetchDAUntilCaughtUp` loop checks for priority heights first: + +1. If priority heights are queued, pop and fetch the lowest one first +2. If no priority heights, continue sequential DA fetching (form last known da height) +3. Priority heights are sorted ascending to process lower heights first +4. Already-processed priority heights are tracked to avoid duplicate fetches + +This ensures that when syncing from P2P, the node can immediately fetch the DA data for blocks it receives, rather than waiting for sequential scanning to reach that height. + ## References [1] [Header Sync][sync-service] diff --git a/node/failover.go b/node/failover.go index 27f4ddf685..787f627ce6 100644 --- a/node/failover.go +++ b/node/failover.go @@ -57,6 +57,8 @@ func newSyncMode( rktStore, exec, da, + headerSyncService.Store(), + dataSyncService.Store(), headerSyncService, dataSyncService, logger, diff --git a/pkg/rpc/client/client_test.go b/pkg/rpc/client/client_test.go index 4b2b82e1b3..31aa938b04 100644 --- a/pkg/rpc/client/client_test.go +++ b/pkg/rpc/client/client_test.go @@ -28,10 +28,11 @@ import ( func setupTestServer( t *testing.T, mockStore *mocks.MockStore, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], mockP2P *mocks.MockP2PRPC, ) (*httptest.Server, *Client) { + t.Helper() mux := http.NewServeMux() logger := zerolog.Nop() @@ -105,8 +106,8 @@ func TestClientGetMetadata(t *testing.T) { func TestClientGetP2PStoreInfo(t *testing.T) { mockStore := mocks.NewMockStore(t) mockP2P := mocks.NewMockP2PRPC(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) - dataStore := headerstoremocks.NewMockStore[*types.Data](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) + dataStore := headerstoremocks.NewMockStore[*types.P2PData](t) now := time.Now().UTC() @@ -250,27 +251,31 @@ func TestClientGetNamespace(t *testing.T) { require.NotEmpty(t, namespaceResp.DataNamespace) } -func testSignedHeader(height uint64, ts time.Time) *types.SignedHeader { - return &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - Height: height, - Time: uint64(ts.UnixNano()), - ChainID: "test-chain", +func testSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { + return &types.P2PSignedHeader{ + SignedHeader: &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + Height: height, + Time: uint64(ts.UnixNano()), + ChainID: "test-chain", + }, + ProposerAddress: []byte{0x01}, + DataHash: []byte{0x02}, + AppHash: []byte{0x03}, }, - ProposerAddress: []byte{0x01}, - DataHash: []byte{0x02}, - AppHash: []byte{0x03}, }, } } -func testData(height uint64, ts time.Time) *types.Data { - return &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: height, - Time: uint64(ts.UnixNano()), +func testData(height uint64, ts time.Time) *types.P2PData { + return &types.P2PData{ + Data: &types.Data{ + Metadata: &types.Metadata{ + ChainID: "test-chain", + Height: height, + Time: uint64(ts.UnixNano()), + }, }, } } diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 0ce4b69464..28b8182d9a 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -35,16 +35,16 @@ var _ rpc.StoreServiceHandler = (*StoreServer)(nil) // StoreServer implements the StoreService defined in the proto file type StoreServer struct { store store.Store - headerStore goheader.Store[*types.SignedHeader] - dataStore goheader.Store[*types.Data] + headerStore goheader.Store[*types.P2PSignedHeader] + dataStore goheader.Store[*types.P2PData] logger zerolog.Logger } // NewStoreServer creates a new StoreServer instance func NewStoreServer( store store.Store, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], logger zerolog.Logger, ) *StoreServer { return &StoreServer{ @@ -376,8 +376,8 @@ type RaftNodeSource interface { // NewServiceHandler creates a new HTTP handler for Store, P2P and Config services func NewServiceHandler( store store.Store, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], peerManager p2p.P2PRPC, proposerAddress []byte, logger zerolog.Logger, diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index c9e2d6483d..ddd50b550e 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -325,8 +325,8 @@ func TestGetGenesisDaHeight_InvalidLength(t *testing.T) { func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns snapshots for configured stores", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) - dataStore := headerstoremocks.NewMockStore[*types.Data](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) + dataStore := headerstoremocks.NewMockStore[*types.P2PData](t) logger := zerolog.Nop() server := NewStoreServer(mockStore, headerStore, dataStore, logger) @@ -354,10 +354,10 @@ func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns error when a store edge fails", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) logger := zerolog.Nop() headerStore.On("Height").Return(uint64(0)) - headerStore.On("Head", mock.Anything).Return((*types.SignedHeader)(nil), fmt.Errorf("boom")) + headerStore.On("Head", mock.Anything).Return((*types.P2PSignedHeader)(nil), fmt.Errorf("boom")) server := NewStoreServer(mockStore, headerStore, nil, logger) resp, err := server.GetP2PStoreInfo(context.Background(), connect.NewRequest(&emptypb.Empty{})) @@ -627,27 +627,31 @@ func TestHealthReadyEndpoint(t *testing.T) { }) } -func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeader { - return &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - Height: height, - Time: uint64(ts.UnixNano()), - ChainID: "test-chain", +func makeTestSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { + return &types.P2PSignedHeader{ + SignedHeader: &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + Height: height, + Time: uint64(ts.UnixNano()), + ChainID: "test-chain", + }, + ProposerAddress: []byte{0x01}, + DataHash: []byte{0x02}, + AppHash: []byte{0x03}, }, - ProposerAddress: []byte{0x01}, - DataHash: []byte{0x02}, - AppHash: []byte{0x03}, }, } } -func makeTestData(height uint64, ts time.Time) *types.Data { - return &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: height, - Time: uint64(ts.UnixNano()), +func makeTestData(height uint64, ts time.Time) *types.P2PData { + return &types.P2PData{ + Data: &types.Data{ + Metadata: &types.Metadata{ + ChainID: "test-chain", + Height: height, + Time: uint64(ts.UnixNano()), + }, }, } } diff --git a/pkg/store/data_store_adapter_test.go b/pkg/store/data_store_adapter_test.go index a43e7838e9..c05db183c4 100644 --- a/pkg/store/data_store_adapter_test.go +++ b/pkg/store/data_store_adapter_test.go @@ -33,6 +33,17 @@ func computeDataIndexHash(h *types.SignedHeader) []byte { return hash[:] } +// wrapData wraps a *types.Data in a *types.P2PData for use with the DataStoreAdapter. +func wrapData(d *types.Data) *types.P2PData { + if d == nil { + return nil + } + return &types.P2PData{ + Data: d, + DAHeightHint: 0, + } +} + func TestDataStoreAdapter_NewDataStoreAdapter(t *testing.T) { t.Parallel() ctx := context.Background() @@ -66,7 +77,7 @@ func TestDataStoreAdapter_AppendAndRetrieve(t *testing.T) { _, d2 := types.GetRandomBlock(2, 2, "test-chain") // Append data - these go to pending cache - err = adapter.Append(ctx, d1, d2) + err = adapter.Append(ctx, wrapData(d1), wrapData(d2)) require.NoError(t, err) // Check height is updated (from pending) @@ -156,7 +167,7 @@ func TestDataStoreAdapter_HasAt(t *testing.T) { adapter := NewDataStoreAdapter(store, testGenesisData()) _, d1 := types.GetRandomBlock(1, 2, "test-chain") - require.NoError(t, adapter.Append(ctx, d1)) + require.NoError(t, adapter.Append(ctx, wrapData(d1))) // HasAt should return true for pending height assert.True(t, adapter.HasAt(ctx, 1)) @@ -203,7 +214,7 @@ func TestDataStoreAdapter_GetRange(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") _, d3 := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2, d3)) + require.NoError(t, adapter.Append(ctx, wrapData(d1), wrapData(d2), wrapData(d3))) // GetRange [1, 3) should return data 1 and 2 dataList, err := adapter.GetRange(ctx, 1, 3) @@ -230,10 +241,10 @@ func TestDataStoreAdapter_GetRangeByHeight(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") _, d3 := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2, d3)) + require.NoError(t, adapter.Append(ctx, wrapData(d1), wrapData(d2), wrapData(d3))) // GetRangeByHeight from d1 to 4 should return data 2 and 3 - dataList, err := adapter.GetRangeByHeight(ctx, d1, 4) + dataList, err := adapter.GetRangeByHeight(ctx, wrapData(d1), 4) require.NoError(t, err) require.Len(t, dataList, 2) assert.Equal(t, uint64(2), dataList[0].Height()) @@ -252,7 +263,7 @@ func TestDataStoreAdapter_Init(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") // Init should add data to pending - err = adapter.Init(ctx, d1) + err = adapter.Init(ctx, wrapData(d1)) require.NoError(t, err) // Verify it's retrievable from pending @@ -262,7 +273,7 @@ func TestDataStoreAdapter_Init(t *testing.T) { // Init again should be a no-op (already initialized) _, d2 := types.GetRandomBlock(2, 1, "test-chain") - err = adapter.Init(ctx, d2) + err = adapter.Init(ctx, wrapData(d2)) require.NoError(t, err) // Height 2 should not be in pending since Init was already done @@ -284,7 +295,7 @@ func TestDataStoreAdapter_Tail(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2)) + require.NoError(t, adapter.Append(ctx, wrapData(d1), wrapData(d2))) // Tail should return the first data from pending tail, err := adapter.Tail(ctx) @@ -346,7 +357,7 @@ func TestDataStoreAdapter_DeleteRange(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") _, d3 := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2, d3)) + require.NoError(t, adapter.Append(ctx, wrapData(d1), wrapData(d2), wrapData(d3))) assert.Equal(t, uint64(3), adapter.Height()) @@ -376,7 +387,7 @@ func TestDataStoreAdapter_OnDelete(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2)) + require.NoError(t, adapter.Append(ctx, wrapData(d1), wrapData(d2))) // Track deleted heights var deletedHeights []uint64 @@ -410,7 +421,7 @@ func TestDataStoreAdapter_AppendSkipsExisting(t *testing.T) { adapter := NewDataStoreAdapter(store, testGenesisData()) // Append the same data again should not error (skips existing in store) - err = adapter.Append(ctx, d1) + err = adapter.Append(ctx, wrapData(d1)) require.NoError(t, err) // Height should still be 1 @@ -430,7 +441,7 @@ func TestDataStoreAdapter_AppendNilData(t *testing.T) { err = adapter.Append(ctx) require.NoError(t, err) - var nilData *types.Data + var nilData *types.P2PData err = adapter.Append(ctx, nilData) require.NoError(t, err) @@ -524,7 +535,7 @@ func TestDataStoreAdapter_ContextTimeout(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") // Note: In-memory store doesn't actually check context, but this verifies // the adapter passes the context through - _ = adapter.Append(ctx, d1) + _ = adapter.Append(ctx, wrapData(d1)) } func TestDataStoreAdapter_GetRangePartial(t *testing.T) { @@ -541,7 +552,7 @@ func TestDataStoreAdapter_GetRangePartial(t *testing.T) { // Only append data for heights 1 and 2, not 3 _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2)) + require.NoError(t, adapter.Append(ctx, wrapData(d1), wrapData(d2))) // GetRange [1, 5) should return data 1 and 2 (partial result) dataList, err := adapter.GetRange(ctx, 1, 5) @@ -580,15 +591,15 @@ func TestDataStoreAdapter_MultipleAppends(t *testing.T) { // Append data in multiple batches _, d1 := types.GetRandomBlock(1, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1)) + require.NoError(t, adapter.Append(ctx, wrapData(d1))) assert.Equal(t, uint64(1), adapter.Height()) _, d2 := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d2)) + require.NoError(t, adapter.Append(ctx, wrapData(d2))) assert.Equal(t, uint64(2), adapter.Height()) _, d3 := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d3)) + require.NoError(t, adapter.Append(ctx, wrapData(d3))) assert.Equal(t, uint64(3), adapter.Height()) // Verify all data is retrievable @@ -608,7 +619,7 @@ func TestDataStoreAdapter_PendingAndStoreInteraction(t *testing.T) { // Add data to pending _, d1 := types.GetRandomBlock(1, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1)) + require.NoError(t, adapter.Append(ctx, wrapData(d1))) // Verify it's in pending retrieved, err := adapter.GetByHeight(ctx, 1) @@ -650,7 +661,7 @@ func TestDataStoreAdapter_HeadPrefersPending(t *testing.T) { // Add height 2 to pending _, d2 := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d2)) + require.NoError(t, adapter.Append(ctx, wrapData(d2))) // Head should return the pending data (higher height) head, err := adapter.Head(ctx) @@ -669,7 +680,7 @@ func TestDataStoreAdapter_GetFromPendingByHash(t *testing.T) { // Add data to pending _, d1 := types.GetRandomBlock(1, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1)) + require.NoError(t, adapter.Append(ctx, wrapData(d1))) // Get by hash from pending (uses data's Hash() method) retrieved, err := adapter.Get(ctx, d1.Hash()) diff --git a/pkg/store/header_store_adapter_test.go b/pkg/store/header_store_adapter_test.go index bb1a281936..12635f097d 100644 --- a/pkg/store/header_store_adapter_test.go +++ b/pkg/store/header_store_adapter_test.go @@ -31,6 +31,17 @@ func computeHeaderIndexHash(h *types.SignedHeader) []byte { return hash[:] } +// wrapHeader wraps a *types.SignedHeader in a *types.P2PSignedHeader for use with the HeaderStoreAdapter. +func wrapHeader(h *types.SignedHeader) *types.P2PSignedHeader { + if h == nil { + return nil + } + return &types.P2PSignedHeader{ + SignedHeader: h, + DAHeightHint: 0, + } +} + func TestHeaderStoreAdapter_NewHeaderStoreAdapter(t *testing.T) { t.Parallel() ctx := context.Background() @@ -64,7 +75,7 @@ func TestHeaderStoreAdapter_AppendAndRetrieve(t *testing.T) { h2, _ := types.GetRandomBlock(2, 2, "test-chain") // Append headers - these go to pending cache - err = adapter.Append(ctx, h1, h2) + err = adapter.Append(ctx, wrapHeader(h1), wrapHeader(h2)) require.NoError(t, err) // Check height is updated (from pending) @@ -154,7 +165,7 @@ func TestHeaderStoreAdapter_HasAt(t *testing.T) { adapter := NewHeaderStoreAdapter(store, testGenesis()) h1, _ := types.GetRandomBlock(1, 2, "test-chain") - require.NoError(t, adapter.Append(ctx, h1)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1))) // HasAt should return true for pending height assert.True(t, adapter.HasAt(ctx, 1)) @@ -201,7 +212,7 @@ func TestHeaderStoreAdapter_GetRange(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") h3, _ := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1, h2, h3)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1), wrapHeader(h2), wrapHeader(h3))) // GetRange [1, 3) should return headers 1 and 2 headers, err := adapter.GetRange(ctx, 1, 3) @@ -228,10 +239,10 @@ func TestHeaderStoreAdapter_GetRangeByHeight(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") h3, _ := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1, h2, h3)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1), wrapHeader(h2), wrapHeader(h3))) // GetRangeByHeight from h1 to 4 should return headers 2 and 3 - headers, err := adapter.GetRangeByHeight(ctx, h1, 4) + headers, err := adapter.GetRangeByHeight(ctx, wrapHeader(h1), 4) require.NoError(t, err) require.Len(t, headers, 2) assert.Equal(t, uint64(2), headers[0].Height()) @@ -250,7 +261,7 @@ func TestHeaderStoreAdapter_Init(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") // Init should add header to pending - err = adapter.Init(ctx, h1) + err = adapter.Init(ctx, wrapHeader(h1)) require.NoError(t, err) // Verify it's retrievable from pending @@ -260,7 +271,7 @@ func TestHeaderStoreAdapter_Init(t *testing.T) { // Init again should be a no-op (already initialized) h2, _ := types.GetRandomBlock(2, 1, "test-chain") - err = adapter.Init(ctx, h2) + err = adapter.Init(ctx, wrapHeader(h2)) require.NoError(t, err) // Height 2 should not be in pending since Init was already done @@ -282,7 +293,7 @@ func TestHeaderStoreAdapter_Tail(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1, h2)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1), wrapHeader(h2))) // Tail should return the first header tail, err := adapter.Tail(ctx) @@ -344,7 +355,7 @@ func TestHeaderStoreAdapter_DeleteRange(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") h3, _ := types.GetRandomBlock(3, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1, h2, h3)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1), wrapHeader(h2), wrapHeader(h3))) assert.Equal(t, uint64(3), adapter.Height()) @@ -374,7 +385,7 @@ func TestHeaderStoreAdapter_OnDelete(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1, h2)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1), wrapHeader(h2))) // Track deleted heights var deletedHeights []uint64 @@ -408,7 +419,7 @@ func TestHeaderStoreAdapter_AppendSkipsExisting(t *testing.T) { adapter := NewHeaderStoreAdapter(store, testGenesis()) // Append the same header again should not error (skips existing in store) - err = adapter.Append(ctx, h1) + err = adapter.Append(ctx, wrapHeader(h1)) require.NoError(t, err) // Height should still be 1 @@ -428,7 +439,7 @@ func TestHeaderStoreAdapter_AppendNilHeaders(t *testing.T) { err = adapter.Append(ctx) require.NoError(t, err) - var nilHeader *types.SignedHeader + var nilHeader *types.P2PSignedHeader err = adapter.Append(ctx, nilHeader) require.NoError(t, err) @@ -522,7 +533,7 @@ func TestHeaderStoreAdapter_ContextTimeout(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") // Note: In-memory store doesn't actually check context, but this verifies // the adapter passes the context through - _ = adapter.Append(ctx, h1) + _ = adapter.Append(ctx, wrapHeader(h1)) } func TestHeaderStoreAdapter_PendingAndStoreInteraction(t *testing.T) { @@ -536,7 +547,7 @@ func TestHeaderStoreAdapter_PendingAndStoreInteraction(t *testing.T) { // Add header to pending h1, _ := types.GetRandomBlock(1, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1))) // Verify it's in pending retrieved, err := adapter.GetByHeight(ctx, 1) @@ -578,7 +589,7 @@ func TestHeaderStoreAdapter_HeadPrefersPending(t *testing.T) { // Add height 2 to pending h2, _ := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h2)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h2))) // Head should return the pending header (higher height) head, err := adapter.Head(ctx) @@ -597,7 +608,7 @@ func TestHeaderStoreAdapter_GetFromPendingByHash(t *testing.T) { // Add header to pending h1, _ := types.GetRandomBlock(1, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1)) + require.NoError(t, adapter.Append(ctx, wrapHeader(h1))) // Get by hash from pending (uses header's Hash() method) retrieved, err := adapter.Get(ctx, h1.Hash()) diff --git a/pkg/store/store.go b/pkg/store/store.go index fcff45034e..40d00547b5 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -152,7 +152,7 @@ func (s *DefaultStore) GetStateAtHeight(ctx context.Context, height uint64) (typ blob, err := s.db.Get(ctx, ds.NewKey(getStateAtHeightKey(height))) if err != nil { if errors.Is(err, ds.ErrNotFound) { - return types.State{}, fmt.Errorf("no state found at height %d", height) + return types.State{}, fmt.Errorf("get state at height %d: %w", height, ds.ErrNotFound) } return types.State{}, fmt.Errorf("failed to retrieve state at height %d: %w", height, err) } diff --git a/pkg/store/store_adapter.go b/pkg/store/store_adapter.go index 79571c20f7..ecd4da7abc 100644 --- a/pkg/store/store_adapter.go +++ b/pkg/store/store_adapter.go @@ -3,7 +3,9 @@ package store import ( "bytes" "context" + "encoding/binary" "errors" + "fmt" "sync" "sync/atomic" @@ -30,6 +32,19 @@ type StoreGetter[H header.Header[H]] interface { Height(ctx context.Context) (uint64, error) // HasAt checks if an item exists at the given height. HasAt(ctx context.Context, height uint64) bool + // GetDAHint retrieves the DA hint for a given height. + GetDAHint(ctx context.Context, height uint64) (uint64, error) + // SetDAHint stores the DA hint for a given height. + SetDAHint(ctx context.Context, height uint64, daHint uint64) error +} + +// EntityWithDAHint extends header.Header with DA hint methods. +// This interface is used by sync services and store adapters to track +// which DA height contains the data for a given block. +type EntityWithDAHint[H any] interface { + header.Header[H] + SetDAHint(daHeight uint64) + DAHint() uint64 } // heightSub provides a mechanism for waiting on a specific height to be stored. @@ -111,7 +126,7 @@ func (hs *heightSub) notifyUpTo(h uint64) { // This cache allows the go-header syncer and P2P handler to access items before they // are validated and persisted by the ev-node syncer. Once the ev-node syncer processes // a block, it writes to the underlying store, and subsequent reads will come from the store. -type StoreAdapter[H header.Header[H]] struct { +type StoreAdapter[H EntityWithDAHint[H]] struct { getter StoreGetter[H] genesisInitialHeight uint64 @@ -127,15 +142,20 @@ type StoreAdapter[H header.Header[H]] struct { // written to the store yet. Keyed by height. Using LRU prevents unbounded growth. pending *lru.Cache[uint64, H] + // daHints caches DA height hints by block height for fast access. + // Hints are also persisted to disk via the getter. + daHints *lru.Cache[uint64, uint64] + // onDeleteFn is called when items are deleted (for rollback scenarios) onDeleteFn func(context.Context, uint64) error } // NewStoreAdapter creates a new StoreAdapter wrapping the given store getter. // The genesis is used to determine the initial height for efficient Tail lookups. -func NewStoreAdapter[H header.Header[H]](getter StoreGetter[H], gen genesis.Genesis) *StoreAdapter[H] { +func NewStoreAdapter[H EntityWithDAHint[H]](getter StoreGetter[H], gen genesis.Genesis) *StoreAdapter[H] { // Create LRU cache for pending items - ignore error as size is constant and valid pendingCache, _ := lru.New[uint64, H](defaultPendingCacheSize) + daHintsCache, _ := lru.New[uint64, uint64](defaultPendingCacheSize) // Get actual current height from store (0 if empty) var storeHeight uint64 @@ -147,6 +167,7 @@ func NewStoreAdapter[H header.Header[H]](getter StoreGetter[H], gen genesis.Gene getter: getter, genesisInitialHeight: max(gen.InitialHeight, 1), pending: pendingCache, + daHints: daHintsCache, heightSub: newHeightSub(storeHeight), } @@ -276,12 +297,14 @@ func (a *StoreAdapter[H]) Get(ctx context.Context, hash header.Hash) (H, error) // First try the store item, err := a.getter.GetByHash(ctx, hash) if err == nil { + a.applyDAHint(item) return item, nil } // Check pending items for _, h := range a.pending.Keys() { if pendingItem, ok := a.pending.Peek(h); ok && !pendingItem.IsZero() && bytes.Equal(pendingItem.Hash(), hash) { + a.applyDAHint(pendingItem) return pendingItem, nil } } @@ -316,17 +339,40 @@ func (a *StoreAdapter[H]) getByHeightNoWait(ctx context.Context, height uint64) // First try the store item, err := a.getter.GetByHeight(ctx, height) if err == nil { + a.applyDAHint(item) return item, nil } // Check pending items if pendingItem, ok := a.pending.Peek(height); ok { + a.applyDAHint(pendingItem) return pendingItem, nil } return zero, header.ErrNotFound } +// applyDAHint sets the DA hint on the item from cache or disk. +func (a *StoreAdapter[H]) applyDAHint(item H) { + if item.IsZero() { + return + } + + height := item.Height() + + // Check cache first + if hint, found := a.daHints.Get(height); found { + item.SetDAHint(hint) + return + } + + // Try to load from disk + if hint, err := a.getter.GetDAHint(context.Background(), height); err == nil && hint > 0 { + a.daHints.Add(height, hint) + item.SetDAHint(hint) + } +} + // GetRangeByHeight returns items in the range [from.Height()+1, to). // This follows go-header's convention where 'from' is the trusted item // and we return items starting from the next height. @@ -431,6 +477,7 @@ func (a *StoreAdapter[H]) Height() uint64 { // Append stores items in the pending cache. // These items are received via P2P and will be available for retrieval // until the ev-node syncer processes and persists them to the store. +// If items have a DA hint set, it will be cached for later retrieval. func (a *StoreAdapter[H]) Append(ctx context.Context, items ...H) error { if len(items) == 0 { return nil @@ -443,9 +490,16 @@ func (a *StoreAdapter[H]) Append(ctx context.Context, items ...H) error { height := item.Height() + // Cache and persist DA hint if present + if hint := item.DAHint(); hint > 0 { + a.daHints.Add(height, hint) + // Persist to disk + _ = a.getter.SetDAHint(ctx, height, hint) + } + // Check if already in store if a.getter.HasAt(ctx, height) { - // Already persisted, skip + // Already persisted, skip adding to pending continue } @@ -492,9 +546,10 @@ func (a *StoreAdapter[H]) Sync(ctx context.Context) error { // DeleteRange deletes items in the range [from, to). // This is used for rollback operations. func (a *StoreAdapter[H]) DeleteRange(ctx context.Context, from, to uint64) error { - // Remove from pending cache + // Remove from pending cache and DA hints cache for height := from; height < to; height++ { a.pending.Remove(height) + a.daHints.Remove(height) if a.onDeleteFn != nil { if err := a.onDeleteFn(ctx, height); err != nil { @@ -527,14 +582,52 @@ func NewHeaderStoreGetter(store Store) *HeaderStoreGetter { } // GetByHeight implements StoreGetter. -func (g *HeaderStoreGetter) GetByHeight(ctx context.Context, height uint64) (*types.SignedHeader, error) { - return g.store.GetHeader(ctx, height) +func (g *HeaderStoreGetter) GetByHeight(ctx context.Context, height uint64) (*types.P2PSignedHeader, error) { + header, err := g.store.GetHeader(ctx, height) + if err != nil { + return nil, err + } + + daHint, _ := g.GetDAHint(ctx, height) + + return &types.P2PSignedHeader{ + SignedHeader: header, + DAHeightHint: daHint, + }, nil +} + +// GetDAHint implements StoreGetter. +func (g *HeaderStoreGetter) GetDAHint(ctx context.Context, height uint64) (uint64, error) { + data, err := g.store.GetMetadata(ctx, GetHeightToDAHeightHeaderKey(height)) + if err != nil { + return 0, err + } + if len(data) != 8 { + return 0, fmt.Errorf("invalid da hint data length: %d", len(data)) + } + return binary.LittleEndian.Uint64(data), nil +} + +// SetDAHint implements StoreGetter. +func (g *HeaderStoreGetter) SetDAHint(ctx context.Context, height uint64, daHint uint64) error { + data := make([]byte, 8) + binary.LittleEndian.PutUint64(data, daHint) + return g.store.SetMetadata(ctx, GetHeightToDAHeightHeaderKey(height), data) } // GetByHash implements StoreGetter. -func (g *HeaderStoreGetter) GetByHash(ctx context.Context, hash []byte) (*types.SignedHeader, error) { +func (g *HeaderStoreGetter) GetByHash(ctx context.Context, hash []byte) (*types.P2PSignedHeader, error) { hdr, _, err := g.store.GetBlockByHash(ctx, hash) - return hdr, err + if err != nil { + return nil, err + } + + daHint, _ := g.GetDAHint(ctx, hdr.Height()) + + return &types.P2PSignedHeader{ + SignedHeader: hdr, + DAHeightHint: daHint, + }, nil } // Height implements StoreGetter. @@ -559,15 +652,52 @@ func NewDataStoreGetter(store Store) *DataStoreGetter { } // GetByHeight implements StoreGetter. -func (g *DataStoreGetter) GetByHeight(ctx context.Context, height uint64) (*types.Data, error) { +func (g *DataStoreGetter) GetByHeight(ctx context.Context, height uint64) (*types.P2PData, error) { _, data, err := g.store.GetBlockData(ctx, height) - return data, err + if err != nil { + return nil, err + } + + daHint, _ := g.GetDAHint(ctx, height) + + return &types.P2PData{ + Data: data, + DAHeightHint: daHint, + }, nil +} + +// GetDAHint implements StoreGetter. +func (g *DataStoreGetter) GetDAHint(ctx context.Context, height uint64) (uint64, error) { + data, err := g.store.GetMetadata(ctx, GetHeightToDAHeightDataKey(height)) + if err != nil { + return 0, err + } + if len(data) != 8 { + return 0, fmt.Errorf("invalid da hint data length: %d", len(data)) + } + return binary.LittleEndian.Uint64(data), nil +} + +// SetDAHint implements StoreGetter. +func (g *DataStoreGetter) SetDAHint(ctx context.Context, height uint64, daHint uint64) error { + data := make([]byte, 8) + binary.LittleEndian.PutUint64(data, daHint) + return g.store.SetMetadata(ctx, GetHeightToDAHeightDataKey(height), data) } // GetByHash implements StoreGetter. -func (g *DataStoreGetter) GetByHash(ctx context.Context, hash []byte) (*types.Data, error) { +func (g *DataStoreGetter) GetByHash(ctx context.Context, hash []byte) (*types.P2PData, error) { _, data, err := g.store.GetBlockByHash(ctx, hash) - return data, err + if err != nil { + return nil, err + } + + daHint, _ := g.GetDAHint(ctx, data.Height()) + + return &types.P2PData{ + Data: data, + DAHeightHint: daHint, + }, nil } // Height implements StoreGetter. @@ -582,17 +712,17 @@ func (g *DataStoreGetter) HasAt(ctx context.Context, height uint64) bool { } // Type aliases for convenience -type HeaderStoreAdapter = StoreAdapter[*types.SignedHeader] -type DataStoreAdapter = StoreAdapter[*types.Data] +type HeaderStoreAdapter = StoreAdapter[*types.P2PSignedHeader] +type DataStoreAdapter = StoreAdapter[*types.P2PData] // NewHeaderStoreAdapter creates a new StoreAdapter for headers. // The genesis is used to determine the initial height for efficient Tail lookups. func NewHeaderStoreAdapter(store Store, gen genesis.Genesis) *HeaderStoreAdapter { - return NewStoreAdapter[*types.SignedHeader](NewHeaderStoreGetter(store), gen) + return NewStoreAdapter(NewHeaderStoreGetter(store), gen) } // NewDataStoreAdapter creates a new StoreAdapter for data. // The genesis is used to determine the initial height for efficient Tail lookups. func NewDataStoreAdapter(store Store, gen genesis.Genesis) *DataStoreAdapter { - return NewStoreAdapter[*types.Data](NewDataStoreGetter(store), gen) + return NewStoreAdapter(NewDataStoreGetter(store), gen) } diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 6d75bd73e2..8567e79764 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -32,10 +32,16 @@ const ( dataSync syncType = "dataSync" ) +// HeaderSyncService is the P2P Sync Service for headers. +type HeaderSyncService = SyncService[*types.P2PSignedHeader] + +// DataSyncService is the P2P Sync Service for blocks. +type DataSyncService = SyncService[*types.P2PData] + // SyncService is the P2P Sync Service for blocks and headers. // // Uses the go-header library for handling all P2P logic. -type SyncService[H header.Header[H]] struct { +type SyncService[H store.EntityWithDAHint[H]] struct { conf config.Config logger zerolog.Logger syncType syncType @@ -55,12 +61,6 @@ type SyncService[H header.Header[H]] struct { storeInitialized atomic.Bool } -// DataSyncService is the P2P Sync Service for blocks. -type DataSyncService = SyncService[*types.Data] - -// HeaderSyncService is the P2P Sync Service for headers. -type HeaderSyncService = SyncService[*types.SignedHeader] - // NewDataSyncService returns a new DataSyncService. func NewDataSyncService( evStore store.Store, @@ -85,7 +85,7 @@ func NewHeaderSyncService( return newSyncService(storeAdapter, headerSync, conf, genesis, p2p, logger) } -func newSyncService[H header.Header[H]]( +func newSyncService[H store.EntityWithDAHint[H]]( storeAdapter header.Store[H], syncType syncType, conf config.Config, @@ -163,6 +163,23 @@ func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, return nil } +func (s *SyncService[H]) AppendDAHint(ctx context.Context, daHeight uint64, heights ...uint64) error { + entries := make([]H, 0, len(heights)) + for _, height := range heights { + v, err := s.store.GetByHeight(ctx, height) + if err != nil { + if errors.Is(err, header.ErrNotFound) { + s.logger.Debug().Uint64("height", height).Msg("cannot append DA height hint; header/data not found in store") + continue + } + return err + } + v.SetDAHint(daHeight) + entries = append(entries, v) + } + return s.store.Append(ctx, entries...) +} + // Start is a part of Service interface. func (syncService *SyncService[H]) Start(ctx context.Context) error { // setup P2P infrastructure, but don't start Subscriber yet. diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index b525f17ffe..ea600f3bdd 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -14,6 +14,7 @@ import ( "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" + "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" @@ -21,8 +22,6 @@ import ( mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/rs/zerolog" "github.com/stretchr/testify/require" - - "github.com/evstack/ev-node/pkg/store" ) func TestHeaderSyncServiceRestart(t *testing.T) { @@ -60,8 +59,8 @@ func TestHeaderSyncServiceRestart(t *testing.T) { defer cancel() require.NoError(t, p2pClient.Start(ctx)) - rktStore := store.New(mainKV) - svc, err := NewHeaderSyncService(rktStore, conf, genesisDoc, p2pClient, logger) + evStore := store.New(mainKV) + svc, err := NewHeaderSyncService(evStore, conf, genesisDoc, p2pClient, logger) require.NoError(t, err) err = svc.Start(ctx) require.NoError(t, err) @@ -76,12 +75,12 @@ func TestHeaderSyncServiceRestart(t *testing.T) { signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: signedHeader})) for i := genesisDoc.InitialHeight + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: signedHeader})) } // then stop and restart service @@ -101,7 +100,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { _ = p2pClient.Close() }) - svc, err = NewHeaderSyncService(rktStore, conf, genesisDoc, p2pClient, logger) + svc, err = NewHeaderSyncService(evStore, conf, genesisDoc, p2pClient, logger) require.NoError(t, err) err = svc.Start(ctx) require.NoError(t, err) @@ -112,7 +111,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { for i := signedHeader.Height() + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: signedHeader})) } cancel() } @@ -152,8 +151,8 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, p2pClient.Start(ctx)) t.Cleanup(func() { _ = p2pClient.Close() }) - rktStore := store.New(mainKV) - svc, err := NewHeaderSyncService(rktStore, conf, genesisDoc, p2pClient, logger) + evStore := store.New(mainKV) + svc, err := NewHeaderSyncService(evStore, conf, genesisDoc, p2pClient, logger) require.NoError(t, err) require.NoError(t, svc.Start(ctx)) t.Cleanup(func() { _ = svc.Stop(context.Background()) }) @@ -168,7 +167,201 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: signedHeader})) +} + +func TestDAHintStorageHeader(t *testing.T) { + mainKV := sync.MutexWrap(datastore.NewMapDatastore()) + pk, _, err := crypto.GenerateEd25519Key(cryptoRand.Reader) + require.NoError(t, err) + noopSigner, err := noop.NewNoopSigner(pk) + require.NoError(t, err) + rnd := rand.New(rand.NewSource(1)) // nolint:gosec // test code only + mn := mocknet.New() + + chainId := "test-chain-id" + + proposerAddr := []byte("test") + genesisDoc := genesispkg.Genesis{ + ChainID: chainId, + StartTime: time.Now(), + InitialHeight: 1, + ProposerAddress: proposerAddr, + } + conf := config.DefaultConfig() + conf.RootDir = t.TempDir() + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(conf.ConfigPath())) + require.NoError(t, err) + logger := zerolog.Nop() + priv := nodeKey.PrivKey + p2pHost, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + + p2pClient, err := p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), p2pHost) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + + evStore := store.New(mainKV) + headerSvc, err := NewHeaderSyncService(evStore, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, headerSvc.Start(ctx)) + + headerConfig := types.HeaderConfig{ + Height: genesisDoc.InitialHeight, + DataHash: bytesN(rnd, 32), + AppHash: bytesN(rnd, 32), + Signer: noopSigner, + } + signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) + require.NoError(t, err) + require.NoError(t, signedHeader.Validate()) + + require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: signedHeader})) + + daHeight := uint64(100) + require.NoError(t, headerSvc.AppendDAHint(ctx, daHeight, signedHeader.Height())) + + h, err := headerSvc.Store().GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, signedHeader.Hash(), h.Hash()) + require.Equal(t, daHeight, h.DAHint()) + + // Persist header to underlying store so it survives restart + // (WriteToStoreAndBroadcast only writes to P2P pending cache) + data := &types.Data{Metadata: &types.Metadata{Height: signedHeader.Height()}} + batch, err := evStore.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(signedHeader, data, &signedHeader.Signature)) + require.NoError(t, batch.SetHeight(signedHeader.Height())) + require.NoError(t, batch.Commit()) + + _ = p2pClient.Close() + _ = headerSvc.Stop(ctx) + cancel() + + // Restart + h2, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + p2pClient, err = p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), h2) + require.NoError(t, err) + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + t.Cleanup(func() { _ = p2pClient.Close() }) + + headerSvc, err = NewHeaderSyncService(evStore, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, headerSvc.Start(ctx)) + t.Cleanup(func() { _ = headerSvc.Stop(context.Background()) }) + + h, err = headerSvc.Store().GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, signedHeader.Hash(), h.Hash()) + require.Equal(t, daHeight, h.DAHint()) +} + +func TestDAHintStorageData(t *testing.T) { + mainKV := sync.MutexWrap(datastore.NewMapDatastore()) + pk, _, err := crypto.GenerateEd25519Key(cryptoRand.Reader) + require.NoError(t, err) + noopSigner, err := noop.NewNoopSigner(pk) + require.NoError(t, err) + rnd := rand.New(rand.NewSource(1)) // nolint:gosec // test code only + mn := mocknet.New() + + chainId := "test-chain-id" + + proposerAddr := []byte("test") + genesisDoc := genesispkg.Genesis{ + ChainID: chainId, + StartTime: time.Now(), + InitialHeight: 1, + ProposerAddress: proposerAddr, + } + conf := config.DefaultConfig() + conf.RootDir = t.TempDir() + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(conf.ConfigPath())) + require.NoError(t, err) + logger := zerolog.Nop() + priv := nodeKey.PrivKey + p2pHost, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + + p2pClient, err := p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), p2pHost) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + + evStore := store.New(mainKV) + dataSvc, err := NewDataSyncService(evStore, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, dataSvc.Start(ctx)) + + // Need a valid header height for data metadata + headerConfig := types.HeaderConfig{ + Height: genesisDoc.InitialHeight, + DataHash: bytesN(rnd, 32), + AppHash: bytesN(rnd, 32), + Signer: noopSigner, + } + signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) + require.NoError(t, err) + + data := types.Data{ + Txs: types.Txs{[]byte("tx1")}, + Metadata: &types.Metadata{ + Height: signedHeader.Height(), + }, + } + + require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: &data})) + + daHeight := uint64(100) + require.NoError(t, dataSvc.AppendDAHint(ctx, daHeight, data.Height())) + + d, err := dataSvc.Store().GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, data.Hash(), d.Hash()) + require.Equal(t, daHeight, d.DAHint()) + + // Persist data to underlying store so it survives restart + // (WriteToStoreAndBroadcast only writes to P2P pending cache) + batch, err := evStore.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(signedHeader, &data, &signedHeader.Signature)) + require.NoError(t, batch.SetHeight(signedHeader.Height())) + require.NoError(t, batch.Commit()) + + _ = p2pClient.Close() + _ = dataSvc.Stop(ctx) + cancel() + + // Restart + h2, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + p2pClient, err = p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), h2) + require.NoError(t, err) + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + t.Cleanup(func() { _ = p2pClient.Close() }) + + dataSvc, err = NewDataSyncService(evStore, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, dataSvc.Start(ctx)) + t.Cleanup(func() { _ = dataSvc.Stop(context.Background()) }) + + d, err = dataSvc.Store().GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, data.Hash(), d.Hash()) + require.Equal(t, daHeight, d.DAHint()) } func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string, noopSigner signer.Signer) *types.SignedHeader { @@ -182,8 +375,7 @@ func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string require.NoError(t, err) newSignedHeader.Signature = signature require.NoError(t, newSignedHeader.Validate()) - previousHeader = newSignedHeader - return previousHeader + return newSignedHeader } func bytesN(r *rand.Rand, n int) []byte { diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index bd23d920eb..e60bd56e0d 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -106,3 +106,18 @@ message Vote { // Validator address bytes validator_address = 5; } + +// P2PSignedHeader +message P2PSignedHeader { + Header header = 1; + bytes signature = 2; + Signer signer = 3; + optional uint64 da_height_hint = 4; +} + +// P2PData +message P2PData { + Metadata metadata = 1; + repeated bytes txs = 2; + optional uint64 da_height_hint = 3; +} diff --git a/test/e2e/go.mod b/test/e2e/go.mod index a0541d39e1..bb87eb6828 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -58,6 +58,7 @@ require ( github.com/bytedance/sonic v1.14.2 // indirect github.com/bytedance/sonic/loader v0.4.0 // indirect github.com/celestiaorg/go-header v0.8.1 // indirect + github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 // indirect github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index bdfbbd6f53..401e415a1c 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -137,6 +137,8 @@ github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCc github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/celestiaorg/go-header v0.8.1 h1:+DKM6y2zkY8rHMyyl1huUDi/5dy9KTUCnP+GKvdL5Jg= github.com/celestiaorg/go-header v0.8.1/go.mod h1:X00prITrMa2kxgEX15WQnbLf0uV6tlvTesDKC5KsDVQ= +github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= +github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 h1:wP84mtwOCVNOTfS3zErICjxKLnh74Z1uf+tdrlSFjVM= github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3/go.mod h1:86qIYnEhmn/hfW+xvw98NOI3zGaDEB3x8JGjYo2FqLs= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= @@ -458,8 +460,8 @@ github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250202011525-fc3143867406 h1:wlQI2cYY0BsWmmPPAnxfQ8SDW0S3Jasn+4B8kXFxprg= -github.com/google/pprof v0.0.0-20250202011525-fc3143867406/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= @@ -821,8 +823,8 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= +github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= diff --git a/types/p2p_envelope.go b/types/p2p_envelope.go new file mode 100644 index 0000000000..d5c5957eef --- /dev/null +++ b/types/p2p_envelope.go @@ -0,0 +1,178 @@ +package types + +import ( + "time" + + "github.com/celestiaorg/go-header" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +var ( + _ header.Header[*P2PData] = &P2PData{} + _ header.Header[*P2PSignedHeader] = &P2PSignedHeader{} +) + +// P2PSignedHeader wraps SignedHeader with an optional DA height hint for P2P sync optimization. +type P2PSignedHeader struct { + *SignedHeader + DAHeightHint uint64 +} + +// New creates a new P2PSignedHeader. +func (p *P2PSignedHeader) New() *P2PSignedHeader { + return &P2PSignedHeader{SignedHeader: new(SignedHeader)} +} + +// IsZero checks if the header is nil or zero. +func (p *P2PSignedHeader) IsZero() bool { + return p == nil || p.SignedHeader == nil || p.SignedHeader.IsZero() +} + +// SetDAHint sets the DA height hint. +func (p *P2PSignedHeader) SetDAHint(daHeight uint64) { + p.DAHeightHint = daHeight +} + +// DAHint returns the DA height hint. +func (p *P2PSignedHeader) DAHint() uint64 { + return p.DAHeightHint +} + +// Verify verifies against an untrusted header. +func (p *P2PSignedHeader) Verify(untrusted *P2PSignedHeader) error { + return p.SignedHeader.Verify(untrusted.SignedHeader) +} + +// MarshalBinary marshals the header to binary using P2P protobuf format. +func (p *P2PSignedHeader) MarshalBinary() ([]byte, error) { + psh, err := p.ToProto() + if err != nil { + return nil, err + } + msg := &pb.P2PSignedHeader{ + Header: psh.Header, + Signature: psh.Signature, + Signer: psh.Signer, + DaHeightHint: &p.DAHeightHint, + } + return proto.Marshal(msg) +} + +// UnmarshalBinary unmarshals the header from binary using P2P protobuf format. +func (p *P2PSignedHeader) UnmarshalBinary(data []byte) error { + var msg pb.P2PSignedHeader + if err := proto.Unmarshal(data, &msg); err != nil { + return err + } + psh := &pb.SignedHeader{ + Header: msg.Header, + Signature: msg.Signature, + Signer: msg.Signer, + } + if p.SignedHeader == nil { + p.SignedHeader = new(SignedHeader) + } + if err := p.FromProto(psh); err != nil { + return err + } + if msg.DaHeightHint != nil { + p.DAHeightHint = *msg.DaHeightHint + } + return nil +} + +// P2PData wraps Data with an optional DA height hint for P2P sync optimization. +type P2PData struct { + *Data + DAHeightHint uint64 +} + +// New creates a new P2PData. +func (p *P2PData) New() *P2PData { + return &P2PData{Data: new(Data)} +} + +// IsZero checks if the data is nil or zero. +func (p *P2PData) IsZero() bool { + return p == nil || p.Data == nil || p.Data.IsZero() +} + +// SetDAHint sets the DA height hint. +func (p *P2PData) SetDAHint(daHeight uint64) { + p.DAHeightHint = daHeight +} + +// DAHint returns the DA height hint. +func (p *P2PData) DAHint() uint64 { + return p.DAHeightHint +} + +// Verify verifies against untrusted data. +func (p *P2PData) Verify(untrusted *P2PData) error { + return p.Data.Verify(untrusted.Data) +} + +// ChainID returns chain ID of the data. +func (p *P2PData) ChainID() string { + return p.Data.ChainID() +} + +// Height returns height of the data. +func (p *P2PData) Height() uint64 { + return p.Data.Height() +} + +// LastHeader returns last header hash of the data. +func (p *P2PData) LastHeader() Hash { + return p.Data.LastHeader() +} + +// Time returns time of the data. +func (p *P2PData) Time() time.Time { + return p.Data.Time() +} + +// Hash returns the hash of the data. +func (p *P2PData) Hash() Hash { + return p.Data.Hash() +} + +// Validate performs basic validation on the data. +func (p *P2PData) Validate() error { + return p.Data.Validate() +} + +// MarshalBinary marshals the data to binary using P2P protobuf format. +func (p *P2PData) MarshalBinary() ([]byte, error) { + pData := p.ToProto() + msg := &pb.P2PData{ + Metadata: pData.Metadata, + Txs: pData.Txs, + DaHeightHint: &p.DAHeightHint, + } + return proto.Marshal(msg) +} + +// UnmarshalBinary unmarshals the data from binary using P2P protobuf format. +func (p *P2PData) UnmarshalBinary(data []byte) error { + var msg pb.P2PData + if err := proto.Unmarshal(data, &msg); err != nil { + return err + } + pData := &pb.Data{ + Metadata: msg.Metadata, + Txs: msg.Txs, + } + if p.Data == nil { + p.Data = new(Data) + } + if err := p.FromProto(pData); err != nil { + return err + } + if msg.DaHeightHint != nil { + p.DAHeightHint = *msg.DaHeightHint + } + return nil +} diff --git a/types/p2p_envelope_test.go b/types/p2p_envelope_test.go new file mode 100644 index 0000000000..01ffd61b74 --- /dev/null +++ b/types/p2p_envelope_test.go @@ -0,0 +1,153 @@ +package types + +import ( + "bytes" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestP2PEnvelope_MarshalUnmarshal(t *testing.T) { + // Create a P2PData envelope + data := &Data{ + Metadata: &Metadata{ + ChainID: "test-chain", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: bytes.Repeat([]byte{0x1}, 32), + }, + Txs: Txs{[]byte{0x1}, []byte{0x2}}, + } + envelope := &P2PData{ + Data: data, + DAHeightHint: 100, + } + + // Marshaling + bz, err := envelope.MarshalBinary() + require.NoError(t, err) + assert.NotEmpty(t, bz) + + // Unmarshaling + newEnvelope := (&P2PData{}).New() + err = newEnvelope.UnmarshalBinary(bz) + require.NoError(t, err) + assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Data.Height(), newEnvelope.Data.Height()) + assert.Equal(t, envelope.Data.ChainID(), newEnvelope.Data.ChainID()) + assert.Equal(t, envelope.LastDataHash, newEnvelope.LastDataHash) + assert.Equal(t, envelope.Txs, newEnvelope.Txs) +} + +func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { + _, pubKey, err := crypto.GenerateEd25519Key(nil) + require.NoError(t, err) + + header := &SignedHeader{ + Header: Header{ + BaseHeader: BaseHeader{ + ChainID: "test-chain", + Height: 5, + Time: uint64(time.Now().UnixNano()), + }, + Version: Version{ + Block: 1, + App: 2, + }, + LastHeaderHash: GetRandomBytes(32), + DataHash: GetRandomBytes(32), + AppHash: GetRandomBytes(32), + ProposerAddress: GetRandomBytes(32), + ValidatorHash: GetRandomBytes(32), + }, + Signature: GetRandomBytes(64), + Signer: Signer{ + PubKey: pubKey, + Address: GetRandomBytes(20), + }, + } + + envelope := &P2PSignedHeader{ + SignedHeader: header, + DAHeightHint: 200, + } + + // Marshaling + bz, err := envelope.MarshalBinary() + require.NoError(t, err) + assert.NotEmpty(t, bz) + + // Unmarshaling + newEnvelope := (&P2PSignedHeader{}).New() + err = newEnvelope.UnmarshalBinary(bz) + require.NoError(t, err) + assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Signer, newEnvelope.Signer) + assert.Equal(t, envelope, newEnvelope) +} + +func TestSignedHeaderBinaryCompatibility(t *testing.T) { + signedHeader, _, err := GetRandomSignedHeader("chain-id") + require.NoError(t, err) + bz, err := signedHeader.MarshalBinary() + require.NoError(t, err) + + p2pHeader := (&P2PSignedHeader{}).New() + err = p2pHeader.UnmarshalBinary(bz) + require.NoError(t, err) + + assert.Equal(t, signedHeader.Header, p2pHeader.Header) + assert.Equal(t, signedHeader.Signature, p2pHeader.Signature) + assert.Equal(t, signedHeader.Signer, p2pHeader.Signer) + assert.Zero(t, p2pHeader.DAHeightHint) + + p2pHeader.DAHeightHint = 100 + p2pBytes, err := p2pHeader.MarshalBinary() + require.NoError(t, err) + + var decodedSignedHeader SignedHeader + err = decodedSignedHeader.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, signedHeader.Header, decodedSignedHeader.Header) + assert.Equal(t, signedHeader.Signature, decodedSignedHeader.Signature) + assert.Equal(t, signedHeader.Signer, decodedSignedHeader.Signer) +} + +func TestDataBinaryCompatibility(t *testing.T) { + data := &Data{ + Metadata: &Metadata{ + ChainID: "chain-id", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: []byte("last-hash"), + }, + Txs: Txs{ + []byte("tx1"), + []byte("tx2"), + }, + } + bz, err := data.MarshalBinary() + require.NoError(t, err) + + p2pData := (&P2PData{}).New() + err = p2pData.UnmarshalBinary(bz) + require.NoError(t, err) + + assert.Equal(t, data.Metadata, p2pData.Metadata) + assert.Equal(t, data.Txs, p2pData.Txs) + assert.Zero(t, p2pData.DAHeightHint) + + p2pData.DAHeightHint = 200 + + p2pBytes, err := p2pData.MarshalBinary() + require.NoError(t, err) + + var decodedData Data + err = decodedData.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, data.Metadata, decodedData.Metadata) + assert.Equal(t, data.Txs, decodedData.Txs) +} diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index 072af753d4..b0a866e76e 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -655,6 +655,136 @@ func (x *Vote) GetValidatorAddress() []byte { return nil } +// P2PSignedHeader +type P2PSignedHeader struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + Signer *Signer `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` + DaHeightHint *uint64 `protobuf:"varint,4,opt,name=da_height_hint,json=daHeightHint,proto3,oneof" json:"da_height_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *P2PSignedHeader) Reset() { + *x = P2PSignedHeader{} + mi := &file_evnode_v1_evnode_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *P2PSignedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*P2PSignedHeader) ProtoMessage() {} + +func (x *P2PSignedHeader) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use P2PSignedHeader.ProtoReflect.Descriptor instead. +func (*P2PSignedHeader) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{9} +} + +func (x *P2PSignedHeader) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *P2PSignedHeader) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *P2PSignedHeader) GetSigner() *Signer { + if x != nil { + return x.Signer + } + return nil +} + +func (x *P2PSignedHeader) GetDaHeightHint() uint64 { + if x != nil && x.DaHeightHint != nil { + return *x.DaHeightHint + } + return 0 +} + +// P2PData +type P2PData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + DaHeightHint *uint64 `protobuf:"varint,3,opt,name=da_height_hint,json=daHeightHint,proto3,oneof" json:"da_height_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *P2PData) Reset() { + *x = P2PData{} + mi := &file_evnode_v1_evnode_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *P2PData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*P2PData) ProtoMessage() {} + +func (x *P2PData) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use P2PData.ProtoReflect.Descriptor instead. +func (*P2PData) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{10} +} + +func (x *P2PData) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *P2PData) GetTxs() [][]byte { + if x != nil { + return x.Txs + } + return nil +} + +func (x *P2PData) GetDaHeightHint() uint64 { + if x != nil && x.DaHeightHint != nil { + return *x.DaHeightHint + } + return 0 +} + var File_evnode_v1_evnode_proto protoreflect.FileDescriptor const file_evnode_v1_evnode_proto_rawDesc = "" + @@ -705,7 +835,18 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x06height\x18\x02 \x01(\x04R\x06height\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\"\n" + "\rblock_id_hash\x18\x04 \x01(\fR\vblockIdHash\x12+\n" + - "\x11validator_address\x18\x05 \x01(\fR\x10validatorAddressB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x11validator_address\x18\x05 \x01(\fR\x10validatorAddress\"\xc3\x01\n" + + "\x0fP2PSignedHeader\x12)\n" + + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + + "\tsignature\x18\x02 \x01(\fR\tsignature\x12)\n" + + "\x06signer\x18\x03 \x01(\v2\x11.evnode.v1.SignerR\x06signer\x12)\n" + + "\x0eda_height_hint\x18\x04 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + + "\x0f_da_height_hint\"\x8a\x01\n" + + "\aP2PData\x12/\n" + + "\bmetadata\x18\x01 \x01(\v2\x13.evnode.v1.MetadataR\bmetadata\x12\x10\n" + + "\x03txs\x18\x02 \x03(\fR\x03txs\x12)\n" + + "\x0eda_height_hint\x18\x03 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + + "\x0f_da_height_hintB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_evnode_proto_rawDescOnce sync.Once @@ -719,7 +860,7 @@ func file_evnode_v1_evnode_proto_rawDescGZIP() []byte { return file_evnode_v1_evnode_proto_rawDescData } -var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 11) var file_evnode_v1_evnode_proto_goTypes = []any{ (*Version)(nil), // 0: evnode.v1.Version (*Header)(nil), // 1: evnode.v1.Header @@ -730,23 +871,28 @@ var file_evnode_v1_evnode_proto_goTypes = []any{ (*Data)(nil), // 6: evnode.v1.Data (*SignedData)(nil), // 7: evnode.v1.SignedData (*Vote)(nil), // 8: evnode.v1.Vote - (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp + (*P2PSignedHeader)(nil), // 9: evnode.v1.P2PSignedHeader + (*P2PData)(nil), // 10: evnode.v1.P2PData + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp } var file_evnode_v1_evnode_proto_depIdxs = []int32{ - 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version - 1, // 1: evnode.v1.SignedHeader.header:type_name -> evnode.v1.Header - 4, // 2: evnode.v1.SignedHeader.signer:type_name -> evnode.v1.Signer - 1, // 3: evnode.v1.DAHeaderEnvelope.header:type_name -> evnode.v1.Header - 4, // 4: evnode.v1.DAHeaderEnvelope.signer:type_name -> evnode.v1.Signer - 5, // 5: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata - 6, // 6: evnode.v1.SignedData.data:type_name -> evnode.v1.Data - 4, // 7: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer - 9, // 8: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version + 1, // 1: evnode.v1.SignedHeader.header:type_name -> evnode.v1.Header + 4, // 2: evnode.v1.SignedHeader.signer:type_name -> evnode.v1.Signer + 1, // 3: evnode.v1.DAHeaderEnvelope.header:type_name -> evnode.v1.Header + 4, // 4: evnode.v1.DAHeaderEnvelope.signer:type_name -> evnode.v1.Signer + 5, // 5: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata + 6, // 6: evnode.v1.SignedData.data:type_name -> evnode.v1.Data + 4, // 7: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer + 11, // 8: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp + 1, // 9: evnode.v1.P2PSignedHeader.header:type_name -> evnode.v1.Header + 4, // 10: evnode.v1.P2PSignedHeader.signer:type_name -> evnode.v1.Signer + 5, // 11: evnode.v1.P2PData.metadata:type_name -> evnode.v1.Metadata + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_evnode_v1_evnode_proto_init() } @@ -754,13 +900,15 @@ func file_evnode_v1_evnode_proto_init() { if File_evnode_v1_evnode_proto != nil { return } + file_evnode_v1_evnode_proto_msgTypes[9].OneofWrappers = []any{} + file_evnode_v1_evnode_proto_msgTypes[10].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_evnode_proto_rawDesc), len(file_evnode_v1_evnode_proto_rawDesc)), NumEnums: 0, - NumMessages: 9, + NumMessages: 11, NumExtensions: 0, NumServices: 0, },