diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index a88e548d21..7829c2e1b0 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -131,7 +131,10 @@ var RunCmd = &cobra.Command{ }() } - return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}) + // nil fiberClient: the EVM app doesn't wire Fibre DA. See + // tools/celestia-node-fiber for the adapter; testapp/cmd/run.go + // has the same TODO note for matching context. + return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}, nil) }, } diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 026cfcd0a6..b052edf9a4 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -86,8 +86,10 @@ The execution client must implement the Evolve execution gRPC interface.`, return err } - // Start the node - return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}) + // Start the node. nil fiberClient: the gRPC app doesn't wire + // Fibre DA. See tools/celestia-node-fiber for the adapter; + // testapp/cmd/run.go has the same TODO note for context. + return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}, nil) }, } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 75e5a49019..05698f1e60 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -97,7 +97,12 @@ var RunCmd = &cobra.Command{ return err } - return cmd.StartNode(logger, command, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}) + // nil fiberClient: testapp doesn't yet wire Fibre DA. To enable + // fiber support here, build a *cnfiber.Adapter from + // nodeConfig.DA.Fiber and pass it as the last argument. The + // adapter wiring lives in tools/celestia-node-fiber; see the + // fiber-bench tool's run.go for a working caller. + return cmd.StartNode(logger, command, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}, nil) }, } diff --git a/block/internal/cache/manager.go b/block/internal/cache/manager.go index 4d95a7d7e5..29193bdaae 100644 --- a/block/internal/cache/manager.go +++ b/block/internal/cache/manager.go @@ -27,7 +27,7 @@ const ( // DefaultTxCacheRetention is the default time to keep transaction hashes in cache. // Keeping a too high value can lead to OOM during heavy transaction load. - DefaultTxCacheRetention = 30 * time.Minute + DefaultTxCacheRetention = 30 * time.Second ) // CacheManager provides thread-safe cache operations for tracking seen blocks @@ -77,10 +77,14 @@ type PendingManager interface { GetPendingData(ctx context.Context) ([]*types.SignedData, [][]byte, error) SetLastSubmittedHeaderHeight(ctx context.Context, height uint64) GetLastSubmittedHeaderHeight() uint64 + ResetInFlightHeaderRange(start, end uint64) SetLastSubmittedDataHeight(ctx context.Context, height uint64) GetLastSubmittedDataHeight() uint64 + ResetInFlightDataRange(start, end uint64) NumPendingHeaders() uint64 NumPendingData() uint64 + NumPendingHeadersTotal() uint64 + NumPendingDataTotal() uint64 } // Manager combines CacheManager and PendingManager. @@ -311,6 +315,10 @@ func (m *implementation) SetLastSubmittedHeaderHeight(ctx context.Context, heigh m.pendingHeaders.SetLastSubmittedHeaderHeight(ctx, height) } +func (m *implementation) ResetInFlightHeaderRange(start, end uint64) { + m.pendingHeaders.ResetInFlightHeaderRange(start, end) +} + func (m *implementation) GetLastSubmittedDataHeight() uint64 { return m.pendingData.GetLastSubmittedDataHeight() } @@ -319,6 +327,10 @@ func (m *implementation) SetLastSubmittedDataHeight(ctx context.Context, height m.pendingData.SetLastSubmittedDataHeight(ctx, height) } +func (m *implementation) ResetInFlightDataRange(start, end uint64) { + m.pendingData.ResetInFlightDataRange(start, end) +} + func (m *implementation) NumPendingHeaders() uint64 { return m.pendingHeaders.NumPendingHeaders() } @@ -327,6 +339,14 @@ func (m *implementation) NumPendingData() uint64 { return m.pendingData.NumPendingData() } +func (m *implementation) NumPendingHeadersTotal() uint64 { + return m.pendingHeaders.NumPendingHeadersTotal() +} + +func (m *implementation) NumPendingDataTotal() uint64 { + return m.pendingData.NumPendingDataTotal() +} + // SetPendingEvent sets the event at the specified height. func (m *implementation) SetPendingEvent(height uint64, event *common.DAHeightEvent) { m.pendingMu.Lock() diff --git a/block/internal/cache/manager_test.go b/block/internal/cache/manager_test.go index fa5aebf34b..7b3fac74c2 100644 --- a/block/internal/cache/manager_test.go +++ b/block/internal/cache/manager_test.go @@ -221,6 +221,12 @@ func TestPendingHeadersAndData_Flow(t *testing.T) { // update last submitted heights and re-check cm.SetLastSubmittedHeaderHeight(ctx, 1) cm.SetLastSubmittedDataHeight(ctx, 2) + cm.ResetInFlightHeaderRange(1, 3) + cm.ResetInFlightDataRange(2, 3) + + // numPending views (before getPending claims items) + assert.Equal(t, uint64(2), cm.NumPendingHeaders()) + assert.Equal(t, uint64(1), cm.NumPendingData()) headers, _, err = cm.GetPendingHeaders(ctx) require.NoError(t, err) @@ -231,10 +237,6 @@ func TestPendingHeadersAndData_Flow(t *testing.T) { require.NoError(t, err) require.Len(t, signedData, 1) assert.Equal(t, uint64(3), signedData[0].Height()) - - // numPending views - assert.Equal(t, uint64(2), cm.NumPendingHeaders()) - assert.Equal(t, uint64(1), cm.NumPendingData()) } func TestManager_TxOperations(t *testing.T) { diff --git a/block/internal/cache/pending_base.go b/block/internal/cache/pending_base.go index 5dc239b8e9..34760ccb39 100644 --- a/block/internal/cache/pending_base.go +++ b/block/internal/cache/pending_base.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "errors" "fmt" + "slices" "sync" "sync/atomic" @@ -18,6 +19,13 @@ import ( // DefaultPendingCacheSize is the default size for the pending items cache. const DefaultPendingCacheSize = 200_000 +// inFlightClaim tracks a contiguous range of heights claimed by getPending +// for DA submission. Claims prevent concurrent getPending calls from +// returning the same items. On submission failure, resetInFlightRange +// removes the claim; if the range is below lastHeight it is added to gaps +// so the items can be re-fetched. +type inFlightClaim struct{ start, end uint64 } + // pendingBase is a generic struct for tracking items (headers, data, etc.) // that need to be published to the DA layer in order. It handles persistence // of the last submitted height and provides methods for retrieving pending items. @@ -28,14 +36,19 @@ type pendingBase[T any] struct { fetch func(ctx context.Context, store store.Store, height uint64) (T, error) lastHeight atomic.Uint64 + // inFlightMu protects inFlightClaims and gaps. + inFlightMu sync.Mutex + inFlightClaims []inFlightClaim // sorted by start + // gaps holds ranges that failed after a later submission advanced lastHeight + // past them. getPending checks gaps first so these items are retried. + gaps []inFlightClaim // sorted by start + // Pending items cache to avoid re-fetching all items on every call. - // We cache the items themselves, keyed by height. pendingCache *lru.Cache[uint64, T] mu sync.Mutex // Protects getPending logic } -// newPendingBase constructs a new pendingBase for a given type. func newPendingBase[T any](store store.Store, logger zerolog.Logger, metaKey string, fetch func(ctx context.Context, store store.Store, height uint64) (T, error)) (*pendingBase[T], error) { pendingCache, err := lru.New[uint64, T](DefaultPendingCacheSize) if err != nil { @@ -57,6 +70,9 @@ func newPendingBase[T any](store store.Store, logger zerolog.Logger, metaKey str // getPending returns a sorted slice of pending items of type T. // It caches fetched items to avoid re-fetching on subsequent calls. +// Returned items are registered as an in-flight claim so that concurrent +// callers do not receive the same items. On failure, the caller must call +// resetInFlightRange with the same [start, end] to re-expose the items. func (pb *pendingBase[T]) getPending(ctx context.Context) ([]T, error) { pb.mu.Lock() defer pb.mu.Unlock() @@ -66,48 +82,45 @@ func (pb *pendingBase[T]) getPending(ctx context.Context) ([]T, error) { if err != nil { return nil, err } - if lastSubmitted == storeHeight { - return nil, nil - } if lastSubmitted > storeHeight { - return nil, fmt.Errorf("height of last submitted item (%d) is greater than height of last item (%d)", lastSubmitted, storeHeight) + return nil, fmt.Errorf("last submitted height (%d) is greater than store height (%d)", lastSubmitted, storeHeight) } - // Limit the number of items to return based on cache capacity. - // This prevents the LRU from evicting entries we need, which would cause re-fetches. - pendingCount := storeHeight - lastSubmitted - endHeight := storeHeight - if pendingCount > DefaultPendingCacheSize { - endHeight = lastSubmitted + DefaultPendingCacheSize + pb.inFlightMu.Lock() + lastSubmitted = pb.lastHeight.Load() + rangeStart, rangeEnd := findAvailableRange(pb.gaps, pb.inFlightClaims, lastSubmitted, storeHeight) + pb.inFlightMu.Unlock() + + if rangeStart == 0 || rangeStart > rangeEnd { + return nil, nil } - // Fetch only items that are not already in cache - for h := lastSubmitted + 1; h <= endHeight; h++ { - if _, ok := pb.pendingCache.Peek(h); ok { - continue // Already cached, skip fetching + // Cap range to cache capacity + if rangeEnd-rangeStart+1 > uint64(DefaultPendingCacheSize) { + rangeEnd = rangeStart + uint64(DefaultPendingCacheSize) - 1 + } + + pending := make([]T, 0, rangeEnd-rangeStart+1) + for h := rangeStart; h <= rangeEnd; h++ { + if item, ok := pb.pendingCache.Get(h); ok { + pending = append(pending, item) + continue } item, err := pb.fetch(ctx, pb.store, h) if err != nil { - return nil, err + return pending, err } pb.pendingCache.Add(h, item) + pending = append(pending, item) } - // Build the result slice from cache (only up to endHeight) - pending := make([]T, 0, endHeight-lastSubmitted) - for h := lastSubmitted + 1; h <= endHeight; h++ { - if item, ok := pb.pendingCache.Get(h); ok { - pending = append(pending, item) - } else { - // This shouldn't happen, but fetch if missing - item, err := pb.fetch(ctx, pb.store, h) - if err != nil { - return pending, err - } - pb.pendingCache.Add(h, item) - pending = append(pending, item) - } + if len(pending) > 0 { + pb.inFlightMu.Lock() + pb.inFlightClaims = insertClaim(pb.inFlightClaims, inFlightClaim{start: rangeStart, end: rangeEnd}) + pb.gaps = removeGapRange(pb.gaps, rangeStart, rangeEnd) + pb.inFlightMu.Unlock() } + return pending, nil } @@ -117,7 +130,36 @@ func (pb *pendingBase[T]) numPending() uint64 { pb.logger.Error().Err(err).Msg("failed to get height in numPending") return 0 } - return height - pb.lastHeight.Load() + + lastSubmitted := pb.lastHeight.Load() + + pb.inFlightMu.Lock() + var count uint64 + for _, gap := range pb.gaps { + count += countUnclaimed(gap.start, gap.end, pb.inFlightClaims) + } + if height > lastSubmitted { + count += countUnclaimed(lastSubmitted+1, height, pb.inFlightClaims) + } + pb.inFlightMu.Unlock() + + return count +} + +func (pb *pendingBase[T]) numPendingTotal() uint64 { + height, err := pb.store.Height(context.Background()) + if err != nil { + pb.logger.Error().Err(err).Msg("failed to get height in numPendingTotal") + return 0 + } + + lastSubmitted := pb.lastHeight.Load() + + if height <= lastSubmitted { + return 0 + } + + return height - lastSubmitted } func (pb *pendingBase[T]) getLastSubmittedHeight() uint64 { @@ -133,10 +175,55 @@ func (pb *pendingBase[T]) setLastSubmittedHeight(ctx context.Context, newLastSub if err != nil { pb.logger.Error().Err(err).Msg("failed to store height of latest item submitted to DA") } - // Note: We don't explicitly clear submitted entries from the cache here. - // Since getPending() only iterates from lastSubmitted+1, old entries are simply - // never accessed. The LRU will naturally evict them when capacity is reached. - // This avoids O(N) iteration over the cache on every submission. + } + + pb.inFlightMu.Lock() + pb.inFlightClaims = trimClaimsBelow(pb.inFlightClaims, newLastSubmittedHeight) + pb.gaps = trimGapsBelow(pb.gaps, newLastSubmittedHeight) + pb.inFlightMu.Unlock() +} + +// resetInFlightRange removes any in-flight claim overlapping [start, end]. +// If the claim has been trimmed by setLastSubmittedHeight (partial success), +// the trimmed claim range is used for gap computation to avoid re-exposing +// already-submitted items. If no claim is found (removed by a concurrent +// setLastSubmittedHeight), the caller's range is used instead. +func (pb *pendingBase[T]) resetInFlightRange(start, end uint64) { + pb.inFlightMu.Lock() + defer pb.inFlightMu.Unlock() + + var removedClaim *inFlightClaim + n := 0 + for _, c := range pb.inFlightClaims { + if c.end < start || c.start > end { + pb.inFlightClaims[n] = c + n++ + } else { + cc := c + removedClaim = &cc + } + } + pb.inFlightClaims = pb.inFlightClaims[:n] + + currentLast := pb.lastHeight.Load() + + var gapStart, gapEnd uint64 + if removedClaim != nil { + gapStart = removedClaim.start + gapEnd = removedClaim.end + } else { + gapStart = start + gapEnd = end + } + + if gapStart > currentLast { + return + } + if gapEnd > currentLast { + gapEnd = currentLast + } + if gapStart <= gapEnd { + pb.gaps = insertClaim(pb.gaps, inFlightClaim{start: gapStart, end: gapEnd}) } } @@ -158,3 +245,131 @@ func (pb *pendingBase[T]) init() error { pb.lastHeight.CompareAndSwap(0, lsh) return nil } + +// --------------------------------------------------------------------------- +// Helper functions for claim / gap bookkeeping +// --------------------------------------------------------------------------- + +// findAvailableRange returns the first contiguous range of heights that are +// not covered by any active claim. Gaps take priority; then items above +// lastHeight. Returns (0, 0) when nothing is available. +func findAvailableRange(gaps, claims []inFlightClaim, lastHeight, storeHeight uint64) (uint64, uint64) { + // Check gaps first + for _, gap := range gaps { + s, e := firstUnclaimed(gap.start, gap.end, claims) + if s <= e { + return s, e + } + } + // Items above lastHeight + if lastHeight < storeHeight { + s, e := firstUnclaimed(lastHeight+1, storeHeight, claims) + return s, e + } + return 0, 0 +} + +// firstUnclaimed returns the first contiguous unclaimed sub-range within [lo, hi]. +func firstUnclaimed(lo, hi uint64, claims []inFlightClaim) (uint64, uint64) { + h := lo + ci := 0 + for ci < len(claims) && claims[ci].end < h { + ci++ + } + // Skip past any claim that covers h + for ci < len(claims) && h >= claims[ci].start && h <= claims[ci].end { + h = claims[ci].end + 1 + ci++ + for ci < len(claims) && claims[ci].end < h { + ci++ + } + } + if h > hi { + return 0, 0 + } + end := hi + if ci < len(claims) && claims[ci].start <= hi { + end = claims[ci].start - 1 + } + return h, end +} + +// countUnclaimed counts heights in [lo, hi] not covered by any claim. +func countUnclaimed(lo, hi uint64, claims []inFlightClaim) uint64 { + if lo > hi { + return 0 + } + total := hi - lo + 1 + for _, c := range claims { + if c.end < lo || c.start > hi { + continue + } + ovStart := max(c.start, lo) + ovEnd := min(c.end, hi) + total -= ovEnd - ovStart + 1 + } + return total +} + +// insertClaim inserts a claim into a sorted-by-start slice, keeping it sorted. +func insertClaim(sorted []inFlightClaim, c inFlightClaim) []inFlightClaim { + idx, _ := slices.BinarySearchFunc(sorted, c.start, func(e inFlightClaim, v uint64) int { + if e.start < v { + return -1 + } + if e.start > v { + return 1 + } + return 0 + }) + return slices.Insert(sorted, idx, c) +} + +// removeGapRange removes or trims gaps covered by [start, end]. +func removeGapRange(gaps []inFlightClaim, start, end uint64) []inFlightClaim { + var result []inFlightClaim + for _, g := range gaps { + if g.end < start || g.start > end { + result = append(result, g) + continue + } + // Partial overlap: keep portions outside [start, end] + if g.start < start { + result = append(result, inFlightClaim{start: g.start, end: start - 1}) + } + if g.end > end { + result = append(result, inFlightClaim{start: end + 1, end: g.end}) + } + } + return result +} + +// trimClaimsBelow removes the portion of each claim that is <= height. +func trimClaimsBelow(claims []inFlightClaim, height uint64) []inFlightClaim { + result := claims[:0] + for _, c := range claims { + if c.end <= height { + continue + } + if c.start <= height { + c.start = height + 1 + } + result = append(result, c) + } + return result +} + +// trimGapsBelow removes gaps fully below height and trims partial overlaps. +func trimGapsBelow(gaps []inFlightClaim, height uint64) []inFlightClaim { + result := gaps[:0] + for _, g := range gaps { + if g.end <= height { + continue + } + if g.start <= height { + g.start = height + 1 + } + result = append(result, g) + } + return result +} diff --git a/block/internal/cache/pending_base_test.go b/block/internal/cache/pending_base_test.go index eb9734a035..d15af428ed 100644 --- a/block/internal/cache/pending_base_test.go +++ b/block/internal/cache/pending_base_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/types" ) func TestPendingBase_ErrorConditions(t *testing.T) { @@ -27,12 +28,10 @@ func TestPendingBase_ErrorConditions(t *testing.T) { require.Error(t, err) // 2) lastSubmitted > height yields error from getPending - // reset metadata to a valid higher value than store height bz := make([]byte, 8) binary.LittleEndian.PutUint64(bz, 5) require.NoError(t, st.SetMetadata(ctx, store.LastSubmittedHeaderHeightKey, bz)) - // ensure store height stays lower (0) ph, err := NewPendingHeaders(st, logger) require.NoError(t, err) pending, _, err := ph.GetPendingHeaders(ctx) @@ -40,7 +39,7 @@ func TestPendingBase_ErrorConditions(t *testing.T) { assert.Len(t, pending, 0) // 3) NewPendingData shares same behavior - err = st.SetMetadata(ctx, LastSubmittedDataHeightKey, []byte{0xFF}) // invalid length + err = st.SetMetadata(ctx, LastSubmittedDataHeightKey, []byte{0xFF}) require.NoError(t, err) _, err = NewPendingData(st, logger) require.Error(t, err) @@ -57,14 +56,12 @@ func TestPendingBase_PersistLastSubmitted(t *testing.T) { ph, err := NewPendingHeaders(st, logger) require.NoError(t, err) - // store height 3 to make numPending meaningful batch, err := st.NewBatch(ctx) require.NoError(t, err) require.NoError(t, batch.SetHeight(3)) require.NoError(t, batch.Commit()) assert.Equal(t, uint64(3), ph.NumPendingHeaders()) - // set last submitted higher and ensure metadata is written ph.SetLastSubmittedHeaderHeight(ctx, 2) raw, err := st.GetMetadata(ctx, store.LastSubmittedHeaderHeightKey) require.NoError(t, err) @@ -72,9 +69,211 @@ func TestPendingBase_PersistLastSubmitted(t *testing.T) { lsh := binary.LittleEndian.Uint64(raw) assert.Equal(t, uint64(2), lsh) - // setting a lower height should not overwrite ph.SetLastSubmittedHeaderHeight(ctx, 1) raw2, err := st.GetMetadata(ctx, store.LastSubmittedHeaderHeightKey) require.NoError(t, err) assert.Equal(t, raw, raw2) } + +func TestPendingBase_InFlightClaim(t *testing.T) { + t.Parallel() + ctx := context.Background() + st := testMemStore(t) + chainID := "inflight" + + for _, h := range []uint64{1, 2, 3, 4, 5} { + hdr, data := types.GetRandomBlock(h, int(h-1), chainID) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, data, &types.Signature{})) + require.NoError(t, batch.SetHeight(h)) + require.NoError(t, batch.Commit()) + } + + ph, err := NewPendingHeaders(st, zerolog.Nop()) + require.NoError(t, err) + + // Claim all 5 items + headers, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, headers, 5) + + // All claimed, nothing pending + assert.Equal(t, uint64(0), ph.NumPendingHeaders()) + + // Second call returns nothing (all claimed) + headers2, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + assert.Empty(t, headers2) + + // Simulate failure: reset the claim range + ph.ResetInFlightHeaderRange(1, 5) + + // Items are available again + assert.Equal(t, uint64(5), ph.NumPendingHeaders()) + headers3, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, headers3, 5) +} + +func TestPendingBase_InFlightPartialAdvance(t *testing.T) { + t.Parallel() + ctx := context.Background() + st := testMemStore(t) + chainID := "inflight-partial" + + for _, h := range []uint64{1, 2, 3, 4, 5} { + hdr, data := types.GetRandomBlock(h, int(h-1), chainID) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, data, &types.Signature{})) + require.NoError(t, batch.SetHeight(h)) + require.NoError(t, batch.Commit()) + } + + ph, err := NewPendingHeaders(st, zerolog.Nop()) + require.NoError(t, err) + + // Claim all items [1..5] + headers, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, headers, 5) + + // Partial success: [1..3] submitted, claim trimmed to [4..5] + ph.SetLastSubmittedHeaderHeight(ctx, 3) + + // Claim [4..5] still active, so getPending returns nothing + headers2, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + assert.Empty(t, headers2) + assert.Equal(t, uint64(0), ph.NumPendingHeaders()) + + // Add new items at heights 6-8 + for _, h := range []uint64{6, 7, 8} { + hdr, data := types.GetRandomBlock(h, int(h-1), chainID) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, data, &types.Signature{})) + require.NoError(t, batch.SetHeight(h)) + require.NoError(t, batch.Commit()) + } + + // Claim [4..5] still active, but items [6..8] are available + assert.Equal(t, uint64(3), ph.NumPendingHeaders()) + headers3, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, headers3, 3) + assert.Equal(t, uint64(6), headers3[0].Height()) + assert.Equal(t, uint64(8), headers3[2].Height()) + + // Retry of [4..5] succeeds + ph.SetLastSubmittedHeaderHeight(ctx, 5) + + // Claims [6..8] active, lastHeight=5, all covered + assert.Equal(t, uint64(0), ph.NumPendingHeaders()) +} + +func TestPendingBase_InFlightGapReexposure(t *testing.T) { + t.Parallel() + ctx := context.Background() + st := testMemStore(t) + chainID := "inflight-gap" + + // Start with items [1..5] + for _, h := range []uint64{1, 2, 3, 4, 5} { + hdr, data := types.GetRandomBlock(h, int(h-1), chainID) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, data, &types.Signature{})) + require.NoError(t, batch.SetHeight(h)) + require.NoError(t, batch.Commit()) + } + + ph, err := NewPendingHeaders(st, zerolog.Nop()) + require.NoError(t, err) + + // Claim A: all available items [1..5] + hA, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, hA, 5) + + // Add items [6..15] + for _, h := range []uint64{6, 7, 8, 9, 10, 11, 12, 13, 14, 15} { + hdr, data := types.GetRandomBlock(h, int(h-1), chainID) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, data, &types.Signature{})) + require.NoError(t, batch.SetHeight(h)) + require.NoError(t, batch.Commit()) + } + + // Claim B: items [6..15] (claim A still covers [1..5]) + hB, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, hB, 10) + assert.Equal(t, uint64(6), hB[0].Height()) + + // A succeeds + ph.SetLastSubmittedHeaderHeight(ctx, 5) + // B succeeds (lastHeight jumps to 15) + ph.SetLastSubmittedHeaderHeight(ctx, 15) + + // Now simulate: a retry of items [8..10] from claim B had failed earlier + // and the retry loop also fails. Reset the sub-range. + // Since [8..10] is below lastHeight=15, it becomes a gap. + ph.ResetInFlightHeaderRange(8, 10) + + // Gap [8..10] should be available + assert.Equal(t, uint64(3), ph.NumPendingHeaders()) + + hRetry, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, hRetry, 3) + assert.Equal(t, uint64(8), hRetry[0].Height()) + assert.Equal(t, uint64(10), hRetry[2].Height()) +} + +func TestPendingBase_InFlightResetOnFailure(t *testing.T) { + t.Parallel() + ctx := context.Background() + st := testMemStore(t) + chainID := "inflight-reset" + + for _, h := range []uint64{1, 2, 3} { + hdr, data := types.GetRandomBlock(h, int(h-1), chainID) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, data, &types.Signature{})) + require.NoError(t, batch.SetHeight(h)) + require.NoError(t, batch.Commit()) + } + + ph, err := NewPendingHeaders(st, zerolog.Nop()) + require.NoError(t, err) + + // Claim all items + _, _, err = ph.GetPendingHeaders(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(0), ph.NumPendingHeaders()) + + // Simulate failure and reset + ph.ResetInFlightHeaderRange(1, 3) + assert.Equal(t, uint64(3), ph.NumPendingHeaders()) + + // Claim again + headers, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, headers, 3) + + // Partial success: [1] submitted, claim trimmed to [2..3] + ph.SetLastSubmittedHeaderHeight(ctx, 1) + // Failure of remaining [2..3]: reset the trimmed claim + ph.ResetInFlightHeaderRange(2, 3) + + // Items from height 2 onward should be available + assert.Equal(t, uint64(2), ph.NumPendingHeaders()) + headers2, _, err := ph.GetPendingHeaders(ctx) + require.NoError(t, err) + require.Len(t, headers2, 2) + assert.Equal(t, uint64(2), headers2[0].Height()) +} diff --git a/block/internal/cache/pending_data.go b/block/internal/cache/pending_data.go index b31334a416..1550f1e1af 100644 --- a/block/internal/cache/pending_data.go +++ b/block/internal/cache/pending_data.go @@ -81,10 +81,19 @@ func (pd *PendingData) NumPendingData() uint64 { return pd.base.numPending() } +func (pd *PendingData) NumPendingDataTotal() uint64 { + pd.advancePastEmptyData(context.Background()) + return pd.base.numPendingTotal() +} + func (pd *PendingData) SetLastSubmittedDataHeight(ctx context.Context, newLastSubmittedDataHeight uint64) { pd.base.setLastSubmittedHeight(ctx, newLastSubmittedDataHeight) } +func (pd *PendingData) ResetInFlightDataRange(start, end uint64) { + pd.base.resetInFlightRange(start, end) +} + // advancePastEmptyData advances lastSubmittedDataHeight past any consecutive empty data blocks. // This ensures that NumPendingData doesn't count empty data that won't be published to DA. func (pd *PendingData) advancePastEmptyData(ctx context.Context) { diff --git a/block/internal/cache/pending_data_test.go b/block/internal/cache/pending_data_test.go index 06e1dd9921..97630bb1d5 100644 --- a/block/internal/cache/pending_data_test.go +++ b/block/internal/cache/pending_data_test.go @@ -47,6 +47,7 @@ func TestPendingData_BasicFlow(t *testing.T) { // set last submitted and verify persistence pendingData.SetLastSubmittedDataHeight(ctx, 1) + pendingData.ResetInFlightDataRange(1, 3) metadataRaw, err := store.GetMetadata(ctx, LastSubmittedDataHeightKey) require.NoError(t, err) require.Len(t, metadataRaw, 8) diff --git a/block/internal/cache/pending_headers.go b/block/internal/cache/pending_headers.go index d12f9627af..8d9810678b 100644 --- a/block/internal/cache/pending_headers.go +++ b/block/internal/cache/pending_headers.go @@ -76,10 +76,18 @@ func (ph *PendingHeaders) NumPendingHeaders() uint64 { return ph.base.numPending() } +func (ph *PendingHeaders) NumPendingHeadersTotal() uint64 { + return ph.base.numPendingTotal() +} + func (ph *PendingHeaders) SetLastSubmittedHeaderHeight(ctx context.Context, newLastSubmittedHeaderHeight uint64) { ph.base.setLastSubmittedHeight(ctx, newLastSubmittedHeaderHeight) } +func (ph *PendingHeaders) ResetInFlightHeaderRange(start, end uint64) { + ph.base.resetInFlightRange(start, end) +} + func (ph *PendingHeaders) GetLastSubmittedHeaderHeight() uint64 { return ph.base.getLastSubmittedHeight() } diff --git a/block/internal/cache/pending_headers_test.go b/block/internal/cache/pending_headers_test.go index 8691f3a574..23ac729548 100644 --- a/block/internal/cache/pending_headers_test.go +++ b/block/internal/cache/pending_headers_test.go @@ -47,6 +47,7 @@ func TestPendingHeaders_BasicFlow(t *testing.T) { // advance last submitted height and verify persistence + filtering pendingHeaders.SetLastSubmittedHeaderHeight(ctx, 2) + pendingHeaders.ResetInFlightHeaderRange(1, 3) metadataRaw, err := store.GetMetadata(ctx, storepkg.LastSubmittedHeaderHeightKey) require.NoError(t, err) require.Len(t, metadataRaw, 8) diff --git a/block/internal/common/consts.go b/block/internal/common/consts.go index 840b2faa97..8e1e679fc3 100644 --- a/block/internal/common/consts.go +++ b/block/internal/common/consts.go @@ -2,19 +2,54 @@ package common import "strconv" -// defaultMaxBlobSizeStr holds the string representation of the default blob -// size limit. Override at link time via: +// defaultMaxBlobSizeStr holds the string representation of the default +// blob size limit. Anchored to Fibre's actual cap: protocol MaxBlobSize +// (1 << 27 = 128 MiB) minus the 5-byte Fibre blob header (1 byte +// version + 4 byte data size). See celestia-app/v9/fibre/blob.go +// (blobHeaderLen) and fibre/protocol_params.go (MaxBlobSize). // -// go build -ldflags "-X github.com/evstack/ev-node/block/internal/common.defaultMaxBlobSizeStr=125829120" -var defaultMaxBlobSizeStr = "5242880" // 5 MB +// HACK(fiber-throughput): this default is correct for fiber-enabled +// deployments but WRONG for the legacy JSON-RPC blob client path — +// the bridge / chain rejects blobs above its own (much smaller) cap, +// so a non-fiber node started against this default would fail to +// submit. The right shape is per-backend: fiber's cap is one number, +// blob-RPC's cap is another, and DefaultMaxBlobSize shouldn't be a +// single global. Restructure into config when the throughput-cleanup +// TODO lands; until then, non-fiber callers should override via +// ldflag or local config. +// +// MUST be a string literal: Go's `-ldflags "-X ..."` only takes effect +// on variables initialized to a string constant, NOT a function call. +// A previous version used strconv.FormatUint here, which compiled but +// silently ignored ldflag overrides. +// +// Override at link time via: +// +// go build -ldflags "-X github.com/evstack/ev-node/block/internal/common.defaultMaxBlobSizeStr=33554432" +var defaultMaxBlobSizeStr = "134217723" // 1 << 27 - 5 = 128 MiB - 5 B // DefaultMaxBlobSize is the max blob size limit used for blob submission. +// +// TODO(throughput-cleanup): this single value is currently plugged in +// at two semantically different limits and the conflation has caused +// real bugs (a packed block marshals larger than its raw-tx total, so +// using MaxBlobSize as both input cap and output cap let blocks blow +// past the DA cap). Split into two: +// +// MaxBlobSize — chain-side ceiling on a marshaled DA blob +// MaxBlockTxBytes() — derived raw-tx budget = MaxBlobSize - per-block +// marshal overhead. Used by RetrieveBatch / +// FilterTxs. +// +// Once that derivation exists, drop the ad-hoc 2% reservation in +// executing/executor.go::RetrieveBatch and the duplicate cap in +// submitting/da_submitter.go::defaultRetryPolicy. var DefaultMaxBlobSize uint64 func init() { v, err := strconv.ParseUint(defaultMaxBlobSizeStr, 10, 64) if err != nil || v == 0 { - DefaultMaxBlobSize = 5 * 1024 * 1024 // 5 MB fallback + DefaultMaxBlobSize = 134217723 return } DefaultMaxBlobSize = v diff --git a/block/internal/da/fiber/types.go b/block/internal/da/fiber/types.go new file mode 100644 index 0000000000..05e9ca27fd --- /dev/null +++ b/block/internal/da/fiber/types.go @@ -0,0 +1,71 @@ +// Package fiber defines the Fiber DA backend interface and shared types. +// +// # Design Assumptions +// +// - The sequencer trusts the encoder to eventually confirm blob inclusion. +// Upload returns after the blob is uploaded and the PFF transaction is +// broadcast, NOT after on-chain confirmation. This keeps the sequencer's +// write path fast (~2s per 128 MB blob). +// +// - Callers are expected to batch/buffer their data into blobs sized for the +// protocol maximum (128 MiB - 5 byte header = 134,217,723 bytes). +// The interface accepts arbitrary sizes but the implementation may batch +// or reject oversized blobs. +// +// - Confirmation/finality is intentionally omitted from the initial API. +// The sequencer does not need it; the read path (Listen + Download) is +// sufficient for full nodes. A Status or Confirm RPC can be added later +// if needed without breaking existing callers. +// +// - Blob ordering is encoded in the blob data itself by the caller. +// The interface does not impose or guarantee ordering. +// +// - The interface is the same whether the encoder runs in-process or as an +// external gRPC service. For in-process use, call the mock or real +// implementation directly; for external use, connect via gRPC. +package fiber + +import ( + "context" + "time" +) + +// BlobID uniquely identifies an uploaded blob (version byte + 32-byte commitment). +type BlobID []byte + +// UploadResult is returned by Upload after the blob is accepted. +type UploadResult struct { + BlobID BlobID + ExpiresAt time.Time +} + +// BlobEvent is delivered via Listen when a blob is confirmed on-chain. +type BlobEvent struct { + BlobID BlobID + Height uint64 + DataSize uint64 +} + +// DA is the interface for interacting with the Fiber data availability layer. +type DA interface { + Head(ctx context.Context) (uint64, error) + + // Upload submits a blob under the given namespace to the DA network. + // Returns after the blob is uploaded and the payment transaction is broadcast. + // Does NOT wait for on-chain confirmation (see package doc for rationale). + Upload(ctx context.Context, namespace []byte, data []byte) (UploadResult, error) + + // Download retrieves and reconstructs a blob by its ID. + // Returns the original data that was passed to Upload. + Download(ctx context.Context, blobID BlobID) ([]byte, error) + + // Listen streams confirmed blob events for the given namespace, + // starting at fromHeight. + // + // fromHeight == 0 starts the stream from the current chain head; any + // positive value replays events from that block forward so a + // subscriber can resume after a restart without missing blobs (the + // DA backend is expected to block, not error, on future heights). + // The returned channel is closed when ctx is cancelled. + Listen(ctx context.Context, namespace []byte, fromHeight uint64) (<-chan BlobEvent, error) +} diff --git a/block/internal/da/fiber_client.go b/block/internal/da/fiber_client.go new file mode 100644 index 0000000000..3a4f9eb754 --- /dev/null +++ b/block/internal/da/fiber_client.go @@ -0,0 +1,394 @@ +package da + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da/fiber" + datypes "github.com/evstack/ev-node/pkg/da/types" +) + +type ( + FiberClient = fiber.DA + BlobID = fiber.BlobID + UploadResult = fiber.UploadResult + BlobEvent = fiber.BlobEvent +) + +type FiberConfig struct { + Client FiberClient + Logger zerolog.Logger + DefaultTimeout time.Duration + Namespace string + DataNamespace string + LastKnownDAHeight uint64 +} + +type fiberDAClient struct { + fiber FiberClient + logger zerolog.Logger + defaultTimeout time.Duration + namespaceBz []byte + dataNamespaceBz []byte + lastKnownDAHeight uint64 +} + +var _ FullClient = (*fiberDAClient)(nil) + +func NewFiberClient(cfg FiberConfig) (FullClient, error) { + if cfg.Client == nil { + return nil, fmt.Errorf("fiber client in config is nil") + } + + if cfg.DefaultTimeout == 0 { + cfg.DefaultTimeout = 60 * time.Second + } + + return &fiberDAClient{ + fiber: cfg.Client, + logger: cfg.Logger.With().Str("component", "fiber_da_client").Logger(), + defaultTimeout: cfg.DefaultTimeout, + lastKnownDAHeight: cfg.LastKnownDAHeight, + namespaceBz: datypes.NamespaceFromString(cfg.Namespace).Bytes(), + dataNamespaceBz: datypes.NamespaceFromString(cfg.DataNamespace).Bytes(), + }, nil +} + +func (c *fiberDAClient) Submit(ctx context.Context, data [][]byte, _ float64, namespace []byte, _ []byte) datypes.ResultSubmit { + if len(data) == 0 { + return datypes.ResultSubmit{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusSuccess, + SubmittedCount: 0, + Timestamp: time.Now(), + }, + } + } + + var blobSize uint64 + for _, b := range data { + blobSize += uint64(len(b)) + } + + for i, raw := range data { + if uint64(len(raw)) > common.DefaultMaxBlobSize { + return datypes.ResultSubmit{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusTooBig, + Message: fmt.Sprintf("blob %d exceeds max size (%d > %d)", i, len(raw), common.DefaultMaxBlobSize), + }, + } + } + } + + flat := flattenBlobs(data) + nsID := namespace[len(namespace)-10:] + result, err := c.fiber.Upload(context.Background(), nsID, flat) + if err != nil { + code := datypes.StatusError + switch { + case errors.Is(err, context.Canceled): + code = datypes.StatusContextCanceled + case errors.Is(err, context.DeadlineExceeded): + code = datypes.StatusContextDeadline + } + c.logger.Error().Err(err).Msg("fiber upload failed") + return datypes.ResultSubmit{ + BaseResult: datypes.BaseResult{ + Code: code, + Message: fmt.Sprintf("fiber upload failed for blob: %v", err), + SubmittedCount: uint64(len(data) - 1), + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } + } + + c.logger.Debug().Int("num_ids", len(data)).Uint64("height", 0 /* TODO */).Msg("fiber DA submission successful") + + return datypes.ResultSubmit{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusSuccess, + IDs: [][]byte{result.BlobID}, + SubmittedCount: uint64(len(data)), + Height: 0, /* TODO */ + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +func (c *fiberDAClient) Retrieve(ctx context.Context, height uint64, namespace []byte) datypes.ResultRetrieve { + return c.retrieve(ctx, height, namespace, true) +} + +func (c *fiberDAClient) RetrieveBlobs(ctx context.Context, height uint64, namespace []byte) datypes.ResultRetrieve { + return c.retrieve(ctx, height, namespace, false) +} + +func (c *fiberDAClient) retrieve(ctx context.Context, height uint64, namespace []byte, _ bool) datypes.ResultRetrieve { + listenCtx, listenCancel := context.WithTimeout(ctx, c.defaultTimeout) + defer listenCancel() + + blobCh, err := c.fiber.Listen(listenCtx, namespace[len(namespace)-10:], height) + if err != nil { + return datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusError, + Message: fmt.Sprintf("fiber listen failed: %v", err), + Height: height, + Timestamp: time.Now(), + }, + } + } + + var blobIDs []BlobID +loop: + for { + select { + case <-listenCtx.Done(): + break loop + case event, ok := <-blobCh: + if !ok { + break loop + } + if event.Height > height { + break loop + } + blobIDs = append(blobIDs, event.BlobID) + } + } + + if len(blobIDs) == 0 { + return datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusNotFound, + Message: "no blobs found at height for given namespace", + Height: height, + Timestamp: time.Now(), + }, + } + } + + ids := make([]datypes.ID, 0, len(blobIDs)) + data := make([][]byte, 0, len(blobIDs)) + for _, blobID := range blobIDs { + dlCtx, dlCancel := context.WithTimeout(ctx, c.defaultTimeout) + blobData, dlErr := c.fiber.Download(dlCtx, blobID) + dlCancel() + if dlErr != nil { + return datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusError, + Message: fmt.Sprintf("fiber download failed for blob %x: %v", blobID, dlErr), + Height: height, + Timestamp: time.Now(), + }, + } + } + split, splitErr := splitBlobs(blobData) + if splitErr != nil { + return datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusError, + Message: fmt.Sprintf("fiber decode failed for blob %x: %v", blobID, splitErr), + Height: height, + Timestamp: time.Now(), + }, + } + } + for _, b := range split { + ids = append(ids, blobID) + data = append(data, b) + } + } + + return datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{ + Code: datypes.StatusSuccess, + Height: height, + IDs: ids, + Timestamp: time.Now(), + }, + Data: data, + } +} + +func (c *fiberDAClient) Get(ctx context.Context, ids []datypes.ID, _ []byte) ([]datypes.Blob, error) { + if len(ids) == 0 { + return nil, nil + } + + res := make([]datypes.Blob, 0, len(ids)) + for _, id := range ids { + downloadCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + data, err := c.fiber.Download(downloadCtx, id) + cancel() + if err != nil { + return nil, fmt.Errorf("fiber download failed for blob %x: %w", id, err) + } + split, splitErr := splitBlobs(data) + if splitErr != nil { + return nil, fmt.Errorf("fiber decode failed for blob %x: %w", id, splitErr) + } + res = append(res, split...) + } + + return res, nil +} + +const fiberSubscribeChanSize = 42 + +func (c *fiberDAClient) Subscribe(ctx context.Context, namespace []byte, _ bool) (<-chan datypes.SubscriptionEvent, error) { + out := make(chan datypes.SubscriptionEvent, fiberSubscribeChanSize) + + go func() { + defer close(out) + + // The outer DA Subscribe entry point does not expose a starting + // height, so start from the live tip (fromHeight=0). A future + // refactor that plumbs resume-from-height through datypes.DA can + // thread the value here. + blobCh, err := c.fiber.Listen(ctx, namespace[len(namespace)-10:], c.lastKnownDAHeight) + if err != nil { + c.logger.Error().Err(err).Msg("fiber listen failed") + return + } + + for { + select { + case <-ctx.Done(): + return + case event, ok := <-blobCh: + if !ok { + return + } + + blobData, err := c.fiber.Download(ctx, event.BlobID) + if err != nil { + c.logger.Error().Err(err).Bytes("blob_id", event.BlobID).Msg("failed to retrieve blob") + continue + } + + split, splitErr := splitBlobs(blobData) + if splitErr != nil { + c.logger.Error().Err(splitErr).Bytes("blob_id", event.BlobID).Msg("failed to decode blob") + continue + } + + select { + case out <- datypes.SubscriptionEvent{ + Height: event.Height, + Timestamp: time.Now(), + Blobs: split, + }: + case <-ctx.Done(): + return + } + } + } + }() + + return out, nil +} + +// GetLatestDAHeight returns the latest height available on the DA layer by +// querying the network head. +func (c *fiberDAClient) GetLatestDAHeight(ctx context.Context) (uint64, error) { + headCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + defer cancel() + + heigth, err := c.fiber.Head(headCtx) + if err != nil { + return 0, fmt.Errorf("failed to get DA network head: %w", err) + } + + return heigth, nil +} + +func (c *fiberDAClient) GetProofs(_ context.Context, ids []datypes.ID, _ []byte) ([]datypes.Proof, error) { + return []datypes.Proof{}, fmt.Errorf("not implemented") +} + +func (c *fiberDAClient) Validate(_ context.Context, ids []datypes.ID, proofs []datypes.Proof, _ []byte) ([]bool, error) { + if len(ids) != len(proofs) { + return nil, errors.New("number of IDs and proofs must match") + } + if len(ids) == 0 { + return []bool{}, nil + } + + results := make([]bool, len(ids)) + + // not implemented. + for i := range results { + results[i] = true + } + + return results, nil +} + +func (c *fiberDAClient) GetHeaderNamespace() []byte { return c.namespaceBz } +func (c *fiberDAClient) GetDataNamespace() []byte { return c.dataNamespaceBz } + +func flattenBlobs(blobs [][]byte) []byte { + if len(blobs) == 0 { + return nil + } + + var total int + for _, b := range blobs { + total += 4 + len(b) + } + total += 4 + + buf := make([]byte, total) + binary.BigEndian.PutUint32(buf, uint32(len(blobs))) + off := 4 + for _, b := range blobs { + binary.BigEndian.PutUint32(buf[off:], uint32(len(b))) + off += 4 + copy(buf[off:], b) + off += len(b) + } + return buf +} + +func splitBlobs(data []byte) ([][]byte, error) { + if len(data) == 0 { + return nil, nil + } + if len(data) < 4 { + return nil, fmt.Errorf("invalid blob encoding: header too short") + } + + count := int(binary.BigEndian.Uint32(data)) + off := 4 + blobs := make([][]byte, 0, count) + for i := range count { + if off+4 > len(data) { + return nil, fmt.Errorf("invalid blob encoding: truncated length at index %d", i) + } + size := int(binary.BigEndian.Uint32(data[off:])) + off += 4 + end := off + size + if end < off || end > len(data) { + return nil, fmt.Errorf("invalid blob encoding: truncated data at index %d", i) + } + blob := make([]byte, size) + copy(blob, data[off:end]) + off = end + blobs = append(blobs, blob) + } + return blobs, nil +} + +// Force Inclusion is disabled for Fiber PoC. +func (c *fiberDAClient) HasForcedInclusionNamespace() bool { return false } +func (c *fiberDAClient) GetForcedInclusionNamespace() []byte { return nil } diff --git a/block/internal/da/fiber_client_test.go b/block/internal/da/fiber_client_test.go new file mode 100644 index 0000000000..fed6f978f7 --- /dev/null +++ b/block/internal/da/fiber_client_test.go @@ -0,0 +1,539 @@ +package da + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/da/fiber" + "github.com/evstack/ev-node/block/internal/da/fibremock" + datypes "github.com/evstack/ev-node/pkg/da/types" +) + +func makeTestFiberClient(t *testing.T) (*fibremock.MockDA, FullClient) { + t.Helper() + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: mock, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-ns", + }) + require.NotNil(t, cl) + require.NoError(t, err) + return mock, cl +} + +func TestFiberClient_NewClient_Nil(t *testing.T) { + _, err := NewFiberClient(FiberConfig{Client: nil}) + require.Error(t, err) +} + +func TestFiberClient_Submit_Success(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + res := cl.Submit(context.Background(), [][]byte{[]byte("hello"), []byte("world")}, 0, ns, nil) + + require.Equal(t, datypes.StatusSuccess, res.Code) + require.Len(t, res.IDs, 1) + require.Equal(t, uint64(2), res.SubmittedCount) + require.Equal(t, uint64(0), res.Height) + require.Equal(t, uint64(10), res.BlobSize) +} + +func TestFiberClient_Submit_SingleBlob(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + res := cl.Submit(context.Background(), [][]byte{[]byte("single")}, 0, ns, nil) + + require.Equal(t, datypes.StatusSuccess, res.Code) + require.Len(t, res.IDs, 1) + require.Equal(t, uint64(6), res.BlobSize) +} + +func TestFiberClient_Submit_EmptyData(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + res := cl.Submit(context.Background(), [][]byte{}, 0, ns, nil) + + require.Equal(t, datypes.StatusSuccess, res.Code) + require.Empty(t, res.IDs) + require.Equal(t, uint64(0), res.SubmittedCount) +} + +func TestFiberClient_Submit_UploadError(t *testing.T) { + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: &faultInjector{FiberClient: mock, err: errors.New("upload failed")}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-ns", + }) + require.NoError(t, err) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + res := cl.Submit(context.Background(), [][]byte{[]byte("data")}, 0, ns, nil) + + require.Equal(t, datypes.StatusError, res.Code) + require.Contains(t, res.Message, "fiber upload failed") +} + +func TestFiberClient_Submit_CanceledContext(t *testing.T) { + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: &faultInjector{FiberClient: mock, err: context.Canceled}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-ns", + }) + require.NoError(t, err) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + res := cl.Submit(context.Background(), [][]byte{[]byte("data")}, 0, ns, nil) + + require.Equal(t, datypes.StatusContextCanceled, res.Code) +} + +func TestFiberClient_Submit_DeadlineExceeded(t *testing.T) { + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: &faultInjector{FiberClient: mock, err: context.DeadlineExceeded}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-ns", + }) + require.NoError(t, err) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + res := cl.Submit(context.Background(), [][]byte{[]byte("data")}, 0, ns, nil) + + require.Equal(t, datypes.StatusContextDeadline, res.Code) +} + +func TestFiberClient_Submit_BlobTooLarge(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + largeBlob := make([]byte, 6*1024*1024) + res := cl.Submit(context.Background(), [][]byte{largeBlob}, 0, ns, nil) + + require.Equal(t, datypes.StatusTooBig, res.Code) +} + +func TestFiberClient_Retrieve_Success(t *testing.T) { + t.Skip("pending Height tracking from fiber DA") + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("hello")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + retrieveRes := cl.Retrieve(context.Background(), submitRes.Height, ns) + require.Equal(t, datypes.StatusSuccess, retrieveRes.Code) + require.Len(t, retrieveRes.Data, 1) + require.Equal(t, []byte("hello"), retrieveRes.Data[0]) + require.Equal(t, submitRes.IDs, retrieveRes.IDs) +} + +func TestFiberClient_RetrieveBlobs_Success(t *testing.T) { + t.Skip("pending Height tracking from fiber DA") + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("blob1"), []byte("blob2")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + retrieveRes := cl.RetrieveBlobs(context.Background(), submitRes.Height, ns) + require.Equal(t, datypes.StatusSuccess, retrieveRes.Code) + require.Len(t, retrieveRes.Data, 2) + require.Equal(t, []byte("blob1"), retrieveRes.Data[0]) + require.Equal(t, []byte("blob2"), retrieveRes.Data[1]) +} + +func TestFiberClient_Retrieve_NotFound(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + retrieveRes := cl.Retrieve(context.Background(), 9999, ns) + require.Equal(t, datypes.StatusNotFound, retrieveRes.Code) +} + +func TestFiberClient_Retrieve_NamespaceFiltering(t *testing.T) { + t.Skip("pending Height tracking from fiber DA") + + _, cl := makeTestFiberClient(t) + + ns1 := datypes.NamespaceFromString("ns-a").Bytes() + ns2 := datypes.NamespaceFromString("ns-b").Bytes() + + res1 := cl.Submit(context.Background(), [][]byte{[]byte("alpha")}, 0, ns1, nil) + require.Equal(t, datypes.StatusSuccess, res1.Code) + + res2 := cl.Submit(context.Background(), [][]byte{[]byte("beta")}, 0, ns2, nil) + require.Equal(t, datypes.StatusSuccess, res2.Code) + + rr1 := cl.Retrieve(context.Background(), res1.Height, ns1) + require.Equal(t, datypes.StatusSuccess, rr1.Code) + require.Equal(t, []byte("alpha"), rr1.Data[0]) + + rr2 := cl.Retrieve(context.Background(), res1.Height, ns2) + require.Equal(t, datypes.StatusNotFound, rr2.Code) + + rr3 := cl.Retrieve(context.Background(), res2.Height, ns2) + require.Equal(t, datypes.StatusSuccess, rr3.Code) + require.Equal(t, []byte("beta"), rr3.Data[0]) +} + +func TestFiberClient_Get_Success(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("getme")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + require.Len(t, submitRes.IDs, 1) + + blobs, err := cl.Get(context.Background(), submitRes.IDs, ns) + require.NoError(t, err) + require.Len(t, blobs, 1) + require.Equal(t, []byte("getme"), blobs[0]) +} + +func TestFiberClient_Get_EmptyIDs(t *testing.T) { + _, cl := makeTestFiberClient(t) + + blobs, err := cl.Get(context.Background(), nil, nil) + require.NoError(t, err) + require.Nil(t, blobs) +} + +func TestFiberClient_Get_DownloadError(t *testing.T) { + _, cl := makeTestFiberClient(t) + + fakeBlobID := make([]byte, 33) + _, err := cl.Get(context.Background(), []datypes.ID{fakeBlobID}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "fiber download failed") +} + +func TestFiberClient_GetProofs_Success(t *testing.T) { + t.Skip() // not implemented + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("prooftest")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + proofs, err := cl.GetProofs(context.Background(), submitRes.IDs, ns) + require.NoError(t, err) + require.Len(t, proofs, 1) + require.NotEmpty(t, proofs[0]) +} + +func TestFiberClient_GetProofs_Empty(t *testing.T) { + t.Skip() // not implemented + + _, cl := makeTestFiberClient(t) + + proofs, err := cl.GetProofs(context.Background(), nil, nil) + require.NoError(t, err) + require.Empty(t, proofs) +} + +func TestFiberClient_Validate_Success(t *testing.T) { + t.Skip() // not implemented + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("validateme")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + proofs, err := cl.GetProofs(context.Background(), submitRes.IDs, ns) + require.NoError(t, err) + + results, err := cl.Validate(context.Background(), submitRes.IDs, proofs, ns) + require.NoError(t, err) + require.Len(t, results, 1) + require.True(t, results[0]) +} + +func TestFiberClient_Validate_MismatchedLengths(t *testing.T) { + _, cl := makeTestFiberClient(t) + + _, err := cl.Validate(context.Background(), make([]datypes.ID, 3), make([]datypes.Proof, 2), nil) + require.Error(t, err) + require.Contains(t, err.Error(), "must match") +} + +func TestFiberClient_Validate_Empty(t *testing.T) { + _, cl := makeTestFiberClient(t) + + results, err := cl.Validate(context.Background(), nil, nil, nil) + require.NoError(t, err) + require.Empty(t, results) +} + +func TestFiberClient_Validate_WrongProof(t *testing.T) { + t.Skip() // not implemented + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("validatewrong")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + fakeProofs := []datypes.Proof{[]byte("wrong-proof")} + results, err := cl.Validate(context.Background(), submitRes.IDs, fakeProofs, ns) + require.NoError(t, err) + require.Len(t, results, 1) + require.False(t, results[0]) +} + +func TestFiberClient_Validate_EmptyProof(t *testing.T) { + t.Skip() // not implemented + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("data")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + emptyProofs := []datypes.Proof{[]byte{}} + results, err := cl.Validate(context.Background(), submitRes.IDs, emptyProofs, ns) + require.NoError(t, err) + require.False(t, results[0]) +} + +func TestFiberClient_Namespaces(t *testing.T) { + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: mock, + Logger: zerolog.Nop(), + Namespace: "header-ns", + DataNamespace: "data-ns", + }) + require.NotNil(t, cl) + require.NoError(t, err) + + require.Equal(t, datypes.NamespaceFromString("header-ns").Bytes(), cl.GetHeaderNamespace()) + require.Equal(t, datypes.NamespaceFromString("data-ns").Bytes(), cl.GetDataNamespace()) + require.False(t, cl.HasForcedInclusionNamespace()) +} + +func TestFiberClient_NoForcedNamespace(t *testing.T) { + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: mock, + Logger: zerolog.Nop(), + Namespace: "header-ns", + DataNamespace: "data-ns", + }) + require.NotNil(t, cl) + require.NoError(t, err) + + require.Nil(t, cl.GetForcedInclusionNamespace()) + require.False(t, cl.HasForcedInclusionNamespace()) +} + +func TestFiberClient_Subscribe(t *testing.T) { + t.Skip("pending Height tracking from fiber DA") + _, cl := makeTestFiberClient(t) + + ctx := t.Context() + + ch, err := cl.Subscribe(ctx, datypes.NamespaceFromString("test-ns").Bytes(), false) + require.NoError(t, err) + require.NotNil(t, ch) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("sub-data")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + select { + case ev := <-ch: + require.Equal(t, submitRes.Height, ev.Height) + require.Len(t, ev.Blobs, 1) + require.Equal(t, []byte("sub-data"), ev.Blobs[0]) + case <-time.After(5 * time.Second): + t.Fatal("subscribe did not emit event within timeout") + } +} + +func TestFiberClient_Submit_MultipleBlobs(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + data := [][]byte{[]byte("first"), []byte("second"), []byte("third")} + res := cl.Submit(context.Background(), data, 0, ns, nil) + + require.Equal(t, datypes.StatusSuccess, res.Code) + require.Len(t, res.IDs, 1) + require.Equal(t, uint64(3), res.SubmittedCount) + + blobs, err := cl.Get(context.Background(), res.IDs, ns) + require.NoError(t, err) + require.Len(t, blobs, 3) + require.Equal(t, []byte("first"), blobs[0]) + require.Equal(t, []byte("second"), blobs[1]) + require.Equal(t, []byte("third"), blobs[2]) +} + +func TestFiberClient_SubmitAndDownload(t *testing.T) { + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + data := []byte("download-test") + submitRes := cl.Submit(context.Background(), [][]byte{data}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + + blobs, err := cl.Get(context.Background(), submitRes.IDs, ns) + require.NoError(t, err) + require.Len(t, blobs, 1) + require.Equal(t, data, blobs[0]) +} + +func TestFiberClient_DefaultTimeout(t *testing.T) { + mock := fibremock.NewMockDA(fibremock.DefaultMockDAConfig()) + cl, err := NewFiberClient(FiberConfig{ + Client: mock, + Logger: zerolog.Nop(), + Namespace: "ns", + DataNamespace: "ns", + }) + require.NotNil(t, cl) + require.NoError(t, err) + + fc := cl.(*fiberDAClient) + require.Equal(t, 60*time.Second, fc.defaultTimeout) +} + +func TestFiberClient_FullSubmitRetrieveCycle(t *testing.T) { + t.Skip() // not implemented + + _, cl := makeTestFiberClient(t) + + ns := datypes.NamespaceFromString("test-ns").Bytes() + + submitRes := cl.Submit(context.Background(), [][]byte{[]byte("cycle-data")}, 0, ns, nil) + require.Equal(t, datypes.StatusSuccess, submitRes.Code) + require.Len(t, submitRes.IDs, 1) + submittedHeight := submitRes.Height + + retrieveRes := cl.Retrieve(context.Background(), submittedHeight, ns) + require.Equal(t, datypes.StatusSuccess, retrieveRes.Code) + require.Equal(t, []byte("cycle-data"), retrieveRes.Data[0]) + + blobs, err := cl.Get(context.Background(), submitRes.IDs, ns) + require.NoError(t, err) + require.Equal(t, []byte("cycle-data"), blobs[0]) + + proofs, err := cl.GetProofs(context.Background(), submitRes.IDs, ns) + require.NoError(t, err) + require.NotEmpty(t, proofs[0]) + + valid, err := cl.Validate(context.Background(), submitRes.IDs, proofs, ns) + require.NoError(t, err) + require.True(t, valid[0]) +} + +type faultInjector struct { + FiberClient + err error +} + +func (f *faultInjector) SetError(err error) { f.err = err } + +func (f *faultInjector) Upload(ctx context.Context, namespace, data []byte) (fiber.UploadResult, error) { + if f.err != nil { + return fiber.UploadResult{}, f.err + } + return f.FiberClient.Upload(ctx, namespace, data) +} + +type failOnNthUpload struct { + FiberClient + failAt uint64 + err error + callCount atomic.Uint64 +} + +func (f *failOnNthUpload) Upload(ctx context.Context, namespace, data []byte) (fiber.UploadResult, error) { + n := f.callCount.Add(1) + if n == f.failAt { + return fiber.UploadResult{}, f.err + } + return f.FiberClient.Upload(ctx, namespace, data) +} + +func TestFlattenSplitBlobs_Roundtrip(t *testing.T) { + cases := []struct { + name string + blobs [][]byte + }{ + {"single", [][]byte{[]byte("hello")}}, + {"multiple", [][]byte{[]byte("first"), []byte("second"), []byte("third")}}, + {"empty_blob", [][]byte{[]byte{}, []byte("data"), []byte{}}}, + {"nil_blob", [][]byte{nil, []byte("data")}}, + {"large", [][]byte{make([]byte, 1024), make([]byte, 4096)}}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + flat := flattenBlobs(tc.blobs) + got, err := splitBlobs(flat) + require.NoError(t, err) + require.Equal(t, len(tc.blobs), len(got)) + for i, b := range got { + expected := tc.blobs[i] + if expected == nil { + expected = []byte{} + } + require.Equal(t, expected, b) + } + }) + } +} + +func TestFlattenBlobs_Empty(t *testing.T) { + require.Nil(t, flattenBlobs(nil)) + require.Nil(t, flattenBlobs([][]byte{})) +} + +func TestSplitBlobs_Empty(t *testing.T) { + got, err := splitBlobs(nil) + require.NoError(t, err) + require.Nil(t, got) + + got, err = splitBlobs([]byte{}) + require.NoError(t, err) + require.Nil(t, got) +} + +func TestSplitBlobs_Truncated(t *testing.T) { + _, err := splitBlobs([]byte{0x01}) + require.Error(t, err) + + _, err = splitBlobs([]byte{0x00, 0x00, 0x00, 0x01, 0x00, 0x00}) + require.Error(t, err) +} + +func TestSplitBlobs_CountMismatch(t *testing.T) { + data := []byte{0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x41} + _, err := splitBlobs(data) + require.Error(t, err) +} diff --git a/block/internal/da/fibremock/mock.go b/block/internal/da/fibremock/mock.go new file mode 100644 index 0000000000..b62d36aef4 --- /dev/null +++ b/block/internal/da/fibremock/mock.go @@ -0,0 +1,300 @@ +package fibremock + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "github.com/evstack/ev-node/block/internal/da/fiber" +) + +var ( + // ErrBlobNotFound is returned when a blob ID is not in the store. + ErrBlobNotFound = errors.New("blob not found") + // ErrDataEmpty is returned when Upload is called with empty data. + ErrDataEmpty = errors.New("data cannot be empty") +) + +// MockDAConfig configures the mock DA implementation. +type MockDAConfig struct { + // MaxBlobs is the maximum number of blobs stored in memory. + // When exceeded, the oldest blob is evicted regardless of retention. + // 0 means no limit (use with caution — large blobs will OOM). + MaxBlobs int + // Retention is how long blobs are kept before automatic pruning. + // 0 means blobs are kept until evicted by MaxBlobs. + Retention time.Duration +} + +// DefaultMockDAConfig returns a config suitable for testing: +// 100 blobs max, 10 minute retention. +func DefaultMockDAConfig() MockDAConfig { + return MockDAConfig{ + MaxBlobs: 100, + Retention: 10 * time.Minute, + } +} + +// storedBlob holds a blob and its metadata in the mock store. +type storedBlob struct { + namespace []byte + data []byte + height uint64 + expiresAt time.Time + createdAt time.Time +} + +// subscriber tracks a Listen subscription. +type subscriber struct { + namespace []byte + ch chan fiber.BlobEvent +} + +// MockDA is an in-memory mock implementation of the DA interface. +// It stores blobs in memory with configurable retention and max blob count. +// Safe for concurrent use. +type MockDA struct { + cfg MockDAConfig + + mu sync.RWMutex + blobs map[string]*storedBlob // keyed by hex(blobID) + order []string // insertion order for LRU eviction + height uint64 + subscribers []subscriber +} + +// NewMockDA creates a new mock DA with the given config. +func NewMockDA(cfg MockDAConfig) *MockDA { + return &MockDA{ + cfg: cfg, + blobs: make(map[string]*storedBlob), + } +} + +func (m *MockDA) Head(ctx context.Context) (uint64, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.height, nil +} + +// Upload stores the blob in memory and notifies listeners. +func (m *MockDA) Upload(ctx context.Context, namespace []byte, data []byte) (fiber.UploadResult, error) { + if len(data) == 0 { + return fiber.UploadResult{}, ErrDataEmpty + } + + blobID := mockBlobID(data) + key := fmt.Sprintf("%x", blobID) + now := time.Now() + + var expiresAt time.Time + if m.cfg.Retention > 0 { + expiresAt = now.Add(m.cfg.Retention) + } + + m.mu.Lock() + + // Evict oldest if at capacity + if m.cfg.MaxBlobs > 0 && len(m.blobs) >= m.cfg.MaxBlobs { + m.evictOldestLocked() + } + + // Prune expired blobs opportunistically + if m.cfg.Retention > 0 { + m.pruneExpiredLocked(now) + } + + m.height++ + height := m.height + + m.blobs[key] = &storedBlob{ + namespace: namespace, + data: data, + height: height, + expiresAt: expiresAt, + createdAt: now, + } + m.order = append(m.order, key) + + // Notify subscribers (non-blocking) + event := fiber.BlobEvent{ + BlobID: blobID, + Height: height, + DataSize: uint64(len(data)), + } + for i := range m.subscribers { + if namespaceMatch(m.subscribers[i].namespace, namespace) { + select { + case m.subscribers[i].ch <- event: + default: + // Channel full, drop event. Subscriber is too slow. + } + } + } + + m.mu.Unlock() + + return fiber.UploadResult{ + BlobID: blobID, + ExpiresAt: expiresAt, + }, nil +} + +// Download retrieves a blob by ID. +func (m *MockDA) Download(ctx context.Context, blobID fiber.BlobID) ([]byte, error) { + key := fmt.Sprintf("%x", blobID) + + m.mu.RLock() + blob, ok := m.blobs[key] + m.mu.RUnlock() + + if !ok { + return nil, ErrBlobNotFound + } + + if !blob.expiresAt.IsZero() && time.Now().After(blob.expiresAt) { + return nil, ErrBlobNotFound + } + + return blob.data, nil +} + +// Listen returns a channel that receives events when blobs matching the +// namespace are uploaded, starting at fromHeight. +// +// fromHeight == 0 subscribes to future uploads only. fromHeight > 0 first +// replays every matching blob still in the store with height >= fromHeight, +// then attaches a live subscriber for subsequent uploads. The replay may +// interleave with live events emitted between the Listen call and the +// replay goroutine's drain; consumers should dedupe by BlobID. +// +// The channel is closed when ctx is cancelled. +func (m *MockDA) Listen(ctx context.Context, namespace []byte, fromHeight uint64) (<-chan fiber.BlobEvent, error) { + ch := make(chan fiber.BlobEvent, 64) + + m.mu.Lock() + // Snapshot matching historicals under the lock to avoid racing with + // concurrent Upload calls; the replay goroutine emits them after. + var replay []fiber.BlobEvent + if fromHeight > 0 { + for _, key := range m.order { + b, ok := m.blobs[key] + if !ok { + continue + } + if !namespaceMatch(namespace, b.namespace) { + continue + } + if b.height < fromHeight { + continue + } + replay = append(replay, fiber.BlobEvent{ + BlobID: mockBlobID(b.data), + Height: b.height, + DataSize: uint64(len(b.data)), + }) + } + } + idx := len(m.subscribers) + m.subscribers = append(m.subscribers, subscriber{ + namespace: namespace, + ch: ch, + }) + m.mu.Unlock() + + // Replay historical events in a goroutine so the caller isn't + // blocked if the buffer fills. Live events may interleave. + var replayDone sync.WaitGroup + if len(replay) > 0 { + replayDone.Go(func() { + for _, ev := range replay { + select { + case ch <- ev: + case <-ctx.Done(): + return + } + } + }) + } + + // Clean up when context is done. + go func() { + <-ctx.Done() + m.mu.Lock() + // Remove subscriber by swapping with last + last := len(m.subscribers) - 1 + if idx <= last { + m.subscribers[idx] = m.subscribers[last] + } + m.subscribers = m.subscribers[:last] + m.mu.Unlock() + replayDone.Wait() + close(ch) + }() + + return ch, nil +} + +// BlobCount returns the number of blobs currently stored. +func (m *MockDA) BlobCount() int { + m.mu.RLock() + defer m.mu.RUnlock() + return len(m.blobs) +} + +// evictOldestLocked removes the oldest blob. Caller must hold m.mu. +func (m *MockDA) evictOldestLocked() { + if len(m.order) == 0 { + return + } + key := m.order[0] + m.order = m.order[1:] + delete(m.blobs, key) +} + +// pruneExpiredLocked removes blobs past their retention. Caller must hold m.mu. +func (m *MockDA) pruneExpiredLocked(now time.Time) { + surviving := m.order[:0] + for _, key := range m.order { + blob, ok := m.blobs[key] + if !ok { + continue + } + if !blob.expiresAt.IsZero() && now.After(blob.expiresAt) { + delete(m.blobs, key) + } else { + surviving = append(surviving, key) + } + } + m.order = surviving +} + +// namespaceMatch returns true if the subscription namespace matches the blob namespace. +// An empty subscription namespace matches all namespaces (wildcard). +func namespaceMatch(subNS, blobNS []byte) bool { + if len(subNS) == 0 { + return true + } + if len(subNS) != len(blobNS) { + return false + } + for i := range subNS { + if subNS[i] != blobNS[i] { + return false + } + } + return true +} + +// mockBlobID produces a deterministic blob ID from the data. +// Format: 1 byte version (0) + 32 bytes SHA256 hash. +func mockBlobID(data []byte) fiber.BlobID { + hash := sha256.Sum256(data) + id := make([]byte, 33) + id[0] = 0 // version byte + copy(id[1:], hash[:]) + return id +} diff --git a/block/internal/da/fibremock/mock_test.go b/block/internal/da/fibremock/mock_test.go new file mode 100644 index 0000000000..5bfe38af7a --- /dev/null +++ b/block/internal/da/fibremock/mock_test.go @@ -0,0 +1,196 @@ +package fibremock + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/evstack/ev-node/block/internal/da/fiber" +) + +func TestMockDA_UploadDownload(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + ctx := context.Background() + + ns := []byte("test-ns") + data := []byte("hello fibre") + + result, err := m.Upload(ctx, ns, data) + if err != nil { + t.Fatal(err) + } + if len(result.BlobID) != 33 { + t.Fatalf("expected 33-byte blob ID, got %d", len(result.BlobID)) + } + + got, err := m.Download(ctx, result.BlobID) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, data) { + t.Fatalf("data mismatch: got %q, want %q", got, data) + } +} + +func TestMockDA_UploadEmpty(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + _, err := m.Upload(context.Background(), []byte("ns"), nil) + if err != ErrDataEmpty { + t.Fatalf("expected ErrDataEmpty, got %v", err) + } +} + +func TestMockDA_DownloadNotFound(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + _, err := m.Download(context.Background(), fiber.BlobID{0, 1, 2}) + if err != ErrBlobNotFound { + t.Fatalf("expected ErrBlobNotFound, got %v", err) + } +} + +func TestMockDA_Listen(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + ctx := t.Context() + + ns := []byte("test-ns") + ch, err := m.Listen(ctx, ns, 0) + if err != nil { + t.Fatal(err) + } + + // Upload a blob — should trigger the listener + data := []byte("listened blob") + result, err := m.Upload(ctx, ns, data) + if err != nil { + t.Fatal(err) + } + + select { + case event := <-ch: + if !bytes.Equal(event.BlobID, result.BlobID) { + t.Fatal("blob ID mismatch in event") + } + if event.Height == 0 { + t.Fatal("expected non-zero height") + } + if event.DataSize != uint64(len(data)) { + t.Fatalf("expected data size %d, got %d", len(data), event.DataSize) + } + case <-time.After(time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestMockDA_ListenNamespaceFilter(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + ctx := t.Context() + + ch, err := m.Listen(ctx, []byte("ns-A"), 0) + if err != nil { + t.Fatal(err) + } + + // Upload to different namespace — should NOT trigger + m.Upload(ctx, []byte("ns-B"), []byte("wrong namespace")) + + select { + case <-ch: + t.Fatal("should not receive event for different namespace") + case <-time.After(50 * time.Millisecond): + // good + } +} + +func TestMockDA_ListenWildcard(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + ctx := t.Context() + + // Empty namespace = wildcard + ch, err := m.Listen(ctx, nil, 0) + if err != nil { + t.Fatal(err) + } + + m.Upload(ctx, []byte("any-ns"), []byte("wildcard test")) + + select { + case event := <-ch: + if event.Height == 0 { + t.Fatal("expected event") + } + case <-time.After(time.Second): + t.Fatal("timeout waiting for wildcard event") + } +} + +func TestMockDA_MaxBlobsEviction(t *testing.T) { + m := NewMockDA(MockDAConfig{MaxBlobs: 3}) + ctx := context.Background() + + var ids []fiber.BlobID + for i := range 5 { + r, err := m.Upload(ctx, nil, []byte{byte(i), 1, 2, 3}) + if err != nil { + t.Fatal(err) + } + ids = append(ids, r.BlobID) + } + + // First two should be evicted + if _, err := m.Download(ctx, ids[0]); err != ErrBlobNotFound { + t.Fatal("expected first blob to be evicted") + } + if _, err := m.Download(ctx, ids[1]); err != ErrBlobNotFound { + t.Fatal("expected second blob to be evicted") + } + + // Last three should still be there + for i := 2; i < 5; i++ { + if _, err := m.Download(ctx, ids[i]); err != nil { + t.Fatalf("blob %d should exist: %v", i, err) + } + } + + if m.BlobCount() != 3 { + t.Fatalf("expected 3 blobs, got %d", m.BlobCount()) + } +} + +func TestMockDA_Retention(t *testing.T) { + m := NewMockDA(MockDAConfig{Retention: 50 * time.Millisecond}) + ctx := context.Background() + + r, err := m.Upload(ctx, nil, []byte("ephemeral")) + if err != nil { + t.Fatal(err) + } + + // Should exist immediately + if _, err := m.Download(ctx, r.BlobID); err != nil { + t.Fatal("blob should exist immediately") + } + + // Wait for expiry + time.Sleep(100 * time.Millisecond) + + if _, err := m.Download(ctx, r.BlobID); err != ErrBlobNotFound { + t.Fatal("blob should have expired") + } +} + +func TestMockDA_DeterministicBlobID(t *testing.T) { + m := NewMockDA(DefaultMockDAConfig()) + ctx := context.Background() + + data := []byte("deterministic") + r1, _ := m.Upload(ctx, nil, data) + r2, _ := m.Upload(ctx, nil, data) + + if !bytes.Equal(r1.BlobID, r2.BlobID) { + t.Fatal("same data should produce same blob ID") + } +} + +// Verify MockDA satisfies the DA interface at compile time. +var _ fiber.DA = (*MockDA)(nil) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index f5be5e1b40..ed47b687bf 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -484,8 +484,8 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { shouldCheck := e.config.Node.MaxPendingHeadersAndData <= pendingCheckInterval || e.pendingCheckCounter%pendingCheckInterval == 0 if shouldCheck { - pendingHeaders := e.cache.NumPendingHeaders() - pendingData := e.cache.NumPendingData() + pendingHeaders := e.cache.NumPendingHeadersTotal() + pendingData := e.cache.NumPendingDataTotal() if pendingHeaders >= e.config.Node.MaxPendingHeadersAndData || pendingData >= e.config.Node.MaxPendingHeadersAndData { e.logger.Warn(). @@ -627,19 +627,22 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { signature: signature, }) - // Broadcast header and data to P2P network sequentially. - // IMPORTANT: Header MUST be broadcast before data — the P2P layer validates - // incoming data against the current and previous header, so out-of-order - // delivery would cause validation failures on peers. - if err := e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{ - SignedHeader: header, - }); err != nil { - e.logger.Error().Err(err).Msg("failed to broadcast header") - } - if err := e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PData{ - Data: data, - }); err != nil { - e.logger.Error().Err(err).Msg("failed to broadcast data") + // No broadcast to P2P when fiber is enabled. + if !e.config.DA.IsFiberEnabled() { + // Broadcast header and data to P2P network sequentially. + // IMPORTANT: Header MUST be broadcast before data — the P2P layer validates + // incoming data against the current and previous header, so out-of-order + // delivery would cause validation failures on peers. + if err := e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{ + SignedHeader: header, + }); err != nil { + e.logger.Error().Err(err).Msg("failed to broadcast header") + } + if err := e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PData{ + Data: data, + }); err != nil { + e.logger.Error().Err(err).Msg("failed to broadcast data") + } } e.recordBlockMetrics(newState, data) @@ -660,11 +663,35 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { return nil } +// blockMarshalOverhead reserves a fraction of MaxBlobSize for the proto +// framing + Metadata overhead added when types.Data is marshaled into a +// DA blob. Empirically the per-tx proto length-prefix runs ~3 bytes, +// which is roughly 1.5% at 200 B txs and stays in that range across +// realistic tx sizes; 2% gives margin for fixed Metadata fields without +// leaving meaningful capacity unused. Reserving here (vs. inside +// FilterTxs) keeps the executor’s view of MaxBytes equal to the raw-tx +// budget and prevents a fully packed batch from blowing past the +// submitter’s MaxBlobSize check. +// +// TODO(throughput-cleanup): this is the workaround half of a deeper +// issue — common.DefaultMaxBlobSize is used as both the raw-tx +// budget AND the marshaled-blob ceiling. The right fix is to derive +// a MaxBlockTxBytes() value once (= MaxBlobSize - overhead) and have +// RetrieveBatch / FilterTxs / da_submitter.limitBatchBySize all +// reference the appropriate value rather than each enforcing the +// same number with their own ad-hoc adjustments. See +// common/consts.go for the umbrella TODO. +const blockMarshalOverheadPct = 2 + // RetrieveBatch gets the next batch of transactions from the sequencer. func (e *Executor) RetrieveBatch(ctx context.Context) (*BatchData, error) { + maxTxBytes := common.DefaultMaxBlobSize + if reserve := maxTxBytes * blockMarshalOverheadPct / 100; reserve < maxTxBytes { + maxTxBytes -= reserve + } req := coresequencer.GetNextBatchRequest{ Id: []byte(e.genesis.ChainID), - MaxBytes: common.DefaultMaxBlobSize, + MaxBytes: maxTxBytes, LastBatchData: [][]byte{}, // Can be populated if needed for sequencer context } diff --git a/block/internal/reaping/reaper.go b/block/internal/reaping/reaper.go index 1f51c08f10..d35c61e422 100644 --- a/block/internal/reaping/reaper.go +++ b/block/internal/reaping/reaper.go @@ -24,7 +24,7 @@ const ( // CleanupInterval is how often the reaper sweeps expired hashes // out of the seen-tx cache. - CleanupInterval = max(cache.DefaultTxCacheRetention/10, 15*time.Second) + CleanupInterval = max(cache.DefaultTxCacheRetention/10, 5*time.Second) ) // Reaper is responsible for periodically retrieving transactions from the executor, diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 83f56d9cb5..2d6fa5ea18 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -122,8 +122,7 @@ type DASubmitter struct { addressSelector pkgda.AddressSelector // envelopeCache caches fully signed DA envelopes by height to avoid re-signing on retries - envelopeCache *lru.Cache[uint64, []byte] - envelopeCacheMu sync.RWMutex + envelopeCache *lru.Cache[uint64, []byte] // lastSubmittedHeight tracks the last successfully submitted height for lazy cache invalidation. // This avoids O(N) iteration over the cache on every submission. @@ -131,6 +130,8 @@ type DASubmitter struct { // signingWorkers is the number of parallel workers for signing signingWorkers int + + wg sync.WaitGroup } // NewDASubmitter creates a new DA submitter @@ -194,22 +195,12 @@ func NewDASubmitter( } } -// recordFailure records a DA submission failure in metrics -func (s *DASubmitter) recordFailure(reason common.DASubmitterFailureReason) { - counter, ok := s.metrics.DASubmitterFailures[reason] - if !ok { - s.logger.Warn().Str("reason", string(reason)).Msg("unregistered failure reason, metric not recorded") - return - } - counter.Add(1) - - if gauge, ok := s.metrics.DASubmitterLastFailure[reason]; ok { - gauge.Set(float64(time.Now().Unix())) - } +func (s *DASubmitter) Close() { + s.wg.Wait() } // SubmitHeaders submits pending headers to DA layer -func (s *DASubmitter) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { +func (s *DASubmitter) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error { if len(headers) == 0 { return nil } @@ -230,28 +221,283 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, headers []*types.Signed return err } - return submitToDA(s, ctx, headers, envelopes, - func(submitted []*types.SignedHeader, res *datypes.ResultSubmit) { - heights := make([]uint64, len(submitted)) - for i, header := range submitted { - cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) - heights[i] = header.Height() + postSubmit := s.makeHeaderPostSubmit(ctx, cache) + namespace := s.client.GetHeaderNamespace() + submittedOffset := 0 + + s.wg.Go(func() { + s.submitWithRetry(ctx, envelopes, namespace, func(submittedCount int, daHeight uint64) { + if submittedCount > 0 { + end := submittedOffset + submittedCount + postSubmit(headers[submittedOffset:end], &datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(submittedCount), Height: daHeight}}) + submittedOffset = end } - if err := s.headerDAHintAppender.AppendDAHint(ctx, res.Height, heights...); err != nil { - s.logger.Error().Err(err).Msg("failed to append da height hint in header p2p store") - // ignoring error here, since we don't want to block the block submission' + }, onSubmitError, "header") + }) + + return nil +} + +func (s *DASubmitter) makeHeaderPostSubmit(ctx context.Context, cache cache.Manager) func([]*types.SignedHeader, *datypes.ResultSubmit) { + return func(submitted []*types.SignedHeader, res *datypes.ResultSubmit) { + heights := make([]uint64, len(submitted)) + for i, header := range submitted { + cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) + heights[i] = header.Height() + } + if err := s.headerDAHintAppender.AppendDAHint(ctx, res.Height, heights...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in header p2p store") + } + if l := len(submitted); l > 0 { + lastHeight := submitted[l-1].Height() + cache.SetLastSubmittedHeaderHeight(ctx, lastHeight) + s.lastSubmittedHeight.Store(lastHeight) + } + } +} + +// SubmitData submits pending data to DA layer +func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error { + if len(unsignedDataList) == 0 { + return nil + } + + if len(marshalledData) != len(unsignedDataList) { + return fmt.Errorf("marshalledData length (%d) does not match unsignedDataList length (%d)", len(marshalledData), len(unsignedDataList)) + } + + // Sign the data (cache returns unsigned SignedData structs) + signedDataList, signedDataListBz, err := s.signData(ctx, unsignedDataList, marshalledData, signer, genesis) + if err != nil { + return fmt.Errorf("failed to sign data: %w", err) + } + + if len(signedDataList) == 0 { + return nil + } + + s.logger.Info().Int("count", len(signedDataList)).Msg("submitting data to DA") + + postSubmit := s.makeDataPostSubmit(ctx, cache) + namespace := s.client.GetDataNamespace() + submittedOffset := 0 + + s.wg.Go(func() { + s.submitWithRetry(ctx, signedDataListBz, namespace, func(submittedCount int, daHeight uint64) { + if submittedCount > 0 { + end := submittedOffset + submittedCount + postSubmit(signedDataList[submittedOffset:end], &datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(submittedCount), Height: daHeight}}) + submittedOffset = end } - if l := len(submitted); l > 0 { - lastHeight := submitted[l-1].Height() - cache.SetLastSubmittedHeaderHeight(ctx, lastHeight) - // Update last submitted height for lazy cache invalidation (O(1) instead of O(N)) - s.lastSubmittedHeight.Store(lastHeight) + }, onSubmitError, "data") + }) + + return nil +} + +func (s *DASubmitter) makeDataPostSubmit(ctx context.Context, cache cache.Manager) func([]*types.SignedData, *datypes.ResultSubmit) { + return func(submitted []*types.SignedData, res *datypes.ResultSubmit) { + heights := make([]uint64, len(submitted)) + for i, sd := range submitted { + cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) + heights[i] = sd.Height() + } + if err := s.dataDAHintAppender.AppendDAHint(ctx, res.Height, heights...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in data p2p store") + } + if l := len(submitted); l > 0 { + lastHeight := submitted[l-1].Height() + cache.SetLastSubmittedDataHeight(ctx, lastHeight) + } + } +} + +func (s *DASubmitter) submitWithRetry( + ctx context.Context, + marshaled [][]byte, + namespace []byte, + onSuccess func(submittedCount int, daHeight uint64), + onError func(error), + itemType string, +) { + pol := defaultRetryPolicy(s.config.DA.MaxSubmitAttempts, s.config.DA.BlockTime.Duration) + options := []byte(s.config.DA.SubmitOptions) + + if len(marshaled) == 0 { + if onError != nil { + onError(nil) + } + return + } + + limitedMarshaled, oversized := limitBatchBySizeBytes(marshaled, pol.MaxBlobBytes) + if oversized { + s.logger.Error(). + Str("itemType", itemType). + Uint64("maxBlobBytes", pol.MaxBlobBytes). + Msg("CRITICAL: item exceeds maximum blob size") + if onError != nil { + onError(common.ErrOversizedItem) + } + return + } + marshaled = limitedMarshaled + + rs := retryState{} + + // Start the retry loop + for rs.Attempt < pol.MaxAttempts { + // Record resend metric for retry attempts (not the first attempt) + if rs.Attempt > 0 { + s.metrics.DASubmitterResends.Add(1) + } + + if err := waitForBackoffOrContext(ctx, rs.Backoff); err != nil { + if onError != nil { + onError(nil) + } + return + } + + // Select signing address and merge with options + signingAddress := s.addressSelector.Next() + mergedOptions, err := mergeSubmitOptions(options, signingAddress) + if err != nil { + s.logger.Error().Err(err).Msg("failed to merge submit options with signing address") + if onError != nil { + onError(err) } - }, - "header", - s.client.GetHeaderNamespace(), - []byte(s.config.DA.SubmitOptions), - ) + return + } + + // Perform submission + start := time.Now() + res := s.client.Submit(ctx, marshaled, -1, namespace, mergedOptions) + s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got Submit response") + + // Record submission result for observability + if vis := server.GetDAVisualizationServer(); vis != nil { + vis.RecordSubmission(&res, 0, uint64(len(marshaled)), namespace) + } + + switch res.Code { + case datypes.StatusSuccess: + submitted := int(res.SubmittedCount) + if submitted <= 0 || submitted > len(marshaled) { + err := fmt.Errorf("invalid submitted count %d for batch size %d", submitted, len(marshaled)) + s.recordFailure(common.DASubmitterFailureReasonUnknown) + s.logger.Error().Err(err).Str("itemType", itemType).Msg("DA layer returned invalid submitted count") + if onError != nil { + onError(err) + } + return + } + if onSuccess != nil { + onSuccess(submitted, res.Height) + } + s.logger.Info().Str("itemType", itemType).Int("count", submitted).Msg("successfully submitted items to DA layer") + if submitted == len(marshaled) { + return + } + // partial success: advance window + marshaled = marshaled[submitted:] + rs.Next(reasonSuccess, pol) + + case datypes.StatusTooBig: + // Record failure metric + s.recordFailure(common.DASubmitterFailureReasonTooBig) + // Iteratively halve until it fits or single-item too big + if len(marshaled) == 1 { + s.logger.Error(). + Str("itemType", itemType). + Msg("CRITICAL: single item exceeds DA blob size limit") + if onError != nil { + onError(common.ErrOversizedItem) + } + return + } + half := len(marshaled) / 2 + if half == 0 { + half = 1 + } + marshaled = marshaled[:half] + s.logger.Debug().Int("newBatchSize", half).Msg("batch too big; halving and retrying") + rs.Next(reasonTooBig, pol) + + case datypes.StatusNotIncludedInBlock: + // Record failure metric + s.recordFailure(common.DASubmitterFailureReasonNotIncludedInBlock) + s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") + rs.Next(reasonMempool, pol) + + case datypes.StatusAlreadyInMempool: + // Record failure metric + s.recordFailure(common.DASubmitterFailureReasonAlreadyInMempool) + s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") + rs.Next(reasonMempool, pol) + + case datypes.StatusContextCanceled: + // Record failure metric + s.recordFailure(common.DASubmitterFailureReasonContextCanceled) + s.logger.Info().Msg("DA layer submission canceled due to context cancellation") + if onError != nil { + onError(nil) + } + return + + default: + // Record failure metric + s.recordFailure(common.DASubmitterFailureReasonUnknown) + s.logger.Error().Str("error", res.Message).Int("attempt", rs.Attempt+1).Msg("DA layer submission failed") + rs.Next(reasonFailure, pol) + } + } + + // Final failure after max attempts + s.recordFailure(common.DASubmitterFailureReasonTimeout) + s.logger.Error().Str("itemType", itemType).Int("attempts", rs.Attempt).Msg("failed to submit all items to DA layer after max attempts") + if onError != nil { + onError(fmt.Errorf("failed to submit after %d attempts", rs.Attempt)) + } +} + +// limitBatchBySizeBytes returns a prefix of marshaled blobs whose total size does not exceed maxBytes. +// If the first blob exceeds maxBytes, it returns (nil, true) to indicate an unrecoverable oversized item. +func limitBatchBySizeBytes(marshaled [][]byte, maxBytes uint64) ([][]byte, bool) { + total := uint64(0) + count := 0 + for i, b := range marshaled { + sz := uint64(len(b)) + if sz > maxBytes { + if i == 0 { + return nil, true + } + break + } + if total+sz > maxBytes { + break + } + total += sz + count++ + } + if count == 0 { + return nil, true + } + return marshaled[:count], false +} + +// recordFailure records a DA submission failure in metrics +func (s *DASubmitter) recordFailure(reason common.DASubmitterFailureReason) { + counter, ok := s.metrics.DASubmitterFailures[reason] + if !ok { + s.logger.Warn().Str("reason", string(reason)).Msg("unregistered failure reason, metric not recorded") + return + } + counter.Add(1) + + if gauge, ok := s.metrics.DASubmitterLastFailure[reason]; ok { + gauge.Set(float64(time.Now().Unix())) + } } // createDAEnvelopes creates signed DA envelopes for the given headers. @@ -283,6 +529,7 @@ func (s *DASubmitter) createDAEnvelopes(ctx context.Context, headers []*types.Si // For small batches, sign sequentially to avoid goroutine overhead if len(needSigning) <= 2 || s.signingWorkers <= 1 { + // Send jobs for _, i := range needSigning { envelope, err := s.signAndCacheEnvelope(ctx, headers[i], marshalledHeaders[i], signer) if err != nil { @@ -330,7 +577,6 @@ func (s *DASubmitter) signEnvelopesParallel( }) } - // Send jobs for _, i := range needSigning { jobs <- signJob{index: i} } @@ -391,9 +637,6 @@ func (s *DASubmitter) getCachedEnvelope(height uint64) []byte { if height <= s.lastSubmittedHeight.Load() { return nil } - s.envelopeCacheMu.RLock() - defer s.envelopeCacheMu.RUnlock() - if envelope, ok := s.envelopeCache.Get(height); ok { return envelope } @@ -410,56 +653,9 @@ func (s *DASubmitter) setCachedEnvelope(height uint64, envelope []byte) { if height <= s.lastSubmittedHeight.Load() { return } - s.envelopeCacheMu.Lock() - defer s.envelopeCacheMu.Unlock() - s.envelopeCache.Add(height, envelope) } -// SubmitData submits pending data to DA layer -func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { - if len(unsignedDataList) == 0 { - return nil - } - - if len(marshalledData) != len(unsignedDataList) { - return fmt.Errorf("marshalledData length (%d) does not match unsignedDataList length (%d)", len(marshalledData), len(unsignedDataList)) - } - - // Sign the data (cache returns unsigned SignedData structs) - signedDataList, signedDataListBz, err := s.signData(ctx, unsignedDataList, marshalledData, signer, genesis) - if err != nil { - return fmt.Errorf("failed to sign data: %w", err) - } - - if len(signedDataList) == 0 { - return nil // No non-empty data to submit - } - - s.logger.Info().Int("count", len(signedDataList)).Msg("submitting data to DA") - - return submitToDA(s, ctx, signedDataList, signedDataListBz, - func(submitted []*types.SignedData, res *datypes.ResultSubmit) { - heights := make([]uint64, len(submitted)) - for i, sd := range submitted { - cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) - heights[i] = sd.Height() - } - if err := s.dataDAHintAppender.AppendDAHint(ctx, res.Height, heights...); err != nil { - s.logger.Error().Err(err).Msg("failed to append da height hint in data p2p store") - // ignoring error here, since we don't want to block the block submission' - } - if l := len(submitted); l > 0 { - lastHeight := submitted[l-1].Height() - cache.SetLastSubmittedDataHeight(ctx, lastHeight) - } - }, - "data", - s.client.GetDataNamespace(), - []byte(s.config.DA.SubmitOptions), - ) -} - // signData signs unsigned SignedData structs returned from cache func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer, genesis genesis.Genesis) ([]*types.SignedData, [][]byte, error) { if signer == nil { @@ -556,163 +752,6 @@ func mergeSubmitOptions(baseOptions []byte, signingAddress string) ([]byte, erro return mergedOptions, nil } -// submitToDA is a generic helper for submitting items to the DA layer with retry, backoff, and gas price logic. -func submitToDA[T any]( - s *DASubmitter, - ctx context.Context, - items []T, - marshaled [][]byte, - postSubmit func([]T, *datypes.ResultSubmit), - itemType string, - namespace []byte, - options []byte, -) error { - if len(items) != len(marshaled) { - return fmt.Errorf("items length (%d) does not match marshaled length (%d)", len(items), len(marshaled)) - } - - pol := defaultRetryPolicy(s.config.DA.MaxSubmitAttempts, s.config.DA.BlockTime.Duration) - - rs := retryState{Attempt: 0, Backoff: 0} - - // Limit this submission to a single size-capped batch - if len(marshaled) > 0 { - batchItems, batchMarshaled, err := limitBatchBySize(items, marshaled, pol.MaxBlobBytes) - if err != nil { - s.logger.Error(). - Str("itemType", itemType). - Uint64("maxBlobBytes", pol.MaxBlobBytes). - Err(err). - Msg("CRITICAL: Unrecoverable error - item exceeds maximum blob size") - return fmt.Errorf("unrecoverable error: no %s items fit within max blob size: %w", itemType, err) - } - items = batchItems - marshaled = batchMarshaled - } - - // Start the retry loop - for rs.Attempt < pol.MaxAttempts { - // Record resend metric for retry attempts (not the first attempt) - if rs.Attempt > 0 { - s.metrics.DASubmitterResends.Add(1) - } - - if err := waitForBackoffOrContext(ctx, rs.Backoff); err != nil { - return err - } - - // Select signing address and merge with options - signingAddress := s.addressSelector.Next() - mergedOptions, err := mergeSubmitOptions(options, signingAddress) - if err != nil { - s.logger.Error().Err(err).Msg("failed to merge submit options with signing address") - return fmt.Errorf("failed to merge submit options: %w", err) - } - - if signingAddress != "" { - s.logger.Debug().Str("signingAddress", signingAddress).Msg("using signing address for DA submission") - } - - // Perform submission - start := time.Now() - res := s.client.Submit(ctx, marshaled, -1, namespace, mergedOptions) - s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithHelpers response from celestia") - - // Record submission result for observability - if daVisualizationServer := server.GetDAVisualizationServer(); daVisualizationServer != nil { - daVisualizationServer.RecordSubmission(&res, 0, uint64(len(items)), namespace) - } - - switch res.Code { - case datypes.StatusSuccess: - submitted := items[:res.SubmittedCount] - postSubmit(submitted, &res) - s.logger.Info().Str("itemType", itemType).Uint64("count", res.SubmittedCount).Msg("successfully submitted items to DA layer") - if int(res.SubmittedCount) == len(items) { - rs.Next(reasonSuccess, pol) - return nil - } - // partial success: advance window - items = items[res.SubmittedCount:] - marshaled = marshaled[res.SubmittedCount:] - rs.Next(reasonSuccess, pol) - - case datypes.StatusTooBig: - // Record failure metric - s.recordFailure(common.DASubmitterFailureReasonTooBig) - // Iteratively halve until it fits or single-item too big - if len(items) == 1 { - s.logger.Error(). - Str("itemType", itemType). - Uint64("maxBlobBytes", pol.MaxBlobBytes). - Msg("CRITICAL: Unrecoverable error - single item exceeds DA blob size limit") - return fmt.Errorf("unrecoverable error: %w: single %s item exceeds DA blob size limit", common.ErrOversizedItem, itemType) - } - half := len(items) / 2 - if half == 0 { - half = 1 - } - items = items[:half] - marshaled = marshaled[:half] - s.logger.Debug().Int("newBatchSize", half).Msg("batch too big; halving and retrying") - rs.Next(reasonTooBig, pol) - - case datypes.StatusNotIncludedInBlock: - // Record failure metric - s.recordFailure(common.DASubmitterFailureReasonNotIncludedInBlock) - s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") - rs.Next(reasonMempool, pol) - - case datypes.StatusAlreadyInMempool: - // Record failure metric - s.recordFailure(common.DASubmitterFailureReasonAlreadyInMempool) - s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") - rs.Next(reasonMempool, pol) - - case datypes.StatusContextCanceled: - // Record failure metric - s.recordFailure(common.DASubmitterFailureReasonContextCanceled) - s.logger.Info().Msg("DA layer submission canceled due to context cancellation") - return context.Canceled - - default: - // Record failure metric - s.recordFailure(common.DASubmitterFailureReasonUnknown) - s.logger.Error().Str("error", res.Message).Int("attempt", rs.Attempt+1).Msg("DA layer submission failed") - rs.Next(reasonFailure, pol) - } - } - - // Final failure after max attempts - s.recordFailure(common.DASubmitterFailureReasonTimeout) - return fmt.Errorf("failed to submit all %s(s) to DA layer after %d attempts", itemType, rs.Attempt) -} - -// limitBatchBySize returns a prefix of items whose total marshaled size does not exceed maxBytes. -// If the first item exceeds maxBytes, it returns ErrOversizedItem which is unrecoverable. -func limitBatchBySize[T any](items []T, marshaled [][]byte, maxBytes uint64) ([]T, [][]byte, error) { - total := uint64(0) - count := 0 - for i := range items { - sz := uint64(len(marshaled[i])) - if sz > maxBytes { - if i == 0 { - return nil, nil, fmt.Errorf("%w: item size %d exceeds max %d", common.ErrOversizedItem, sz, maxBytes) - } - break - } - if total+sz > maxBytes { - break - } - total += sz - count++ - } - if count == 0 { - return nil, nil, fmt.Errorf("no items fit within %d bytes", maxBytes) - } - return items[:count], marshaled[:count], nil -} - func waitForBackoffOrContext(ctx context.Context, backoff time.Duration) error { if backoff <= 0 { select { diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index b2c4efcd20..1de6cc2559 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -101,11 +101,13 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( // Submit headers and data - cache returns both items and marshalled bytes headers, marshalledHeaders, err := cm.GetPendingHeaders(context.Background()) require.NoError(t, err) - require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), headers, marshalledHeaders, cm, n)) + require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), headers, marshalledHeaders, cm, n, nil)) dataList, marshalledData, err := cm.GetPendingData(context.Background()) require.NoError(t, err) - require.NoError(t, daSubmitter.SubmitData(context.Background(), dataList, marshalledData, cm, n, gen)) + require.NoError(t, daSubmitter.SubmitData(context.Background(), dataList, marshalledData, cm, n, gen, nil)) + + daSubmitter.Close() // After submission, inclusion markers should be set _, ok := cm.GetHeaderDAIncludedByHeight(1) diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index 2d79208e92..021e551210 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -1,13 +1,12 @@ package submitting import ( - "context" "testing" "time" "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/pkg/config" @@ -16,10 +15,9 @@ import ( "github.com/evstack/ev-node/test/mocks" ) -// helper to build a basic submitter with provided DA mock client and config overrides -func newTestSubmitter(t *testing.T, mockClient *mocks.MockClient, override func(*config.Config)) *DASubmitter { +func newTestBatchSubmitter(t *testing.T, mockClient *mocks.MockClient, override func(*config.Config)) *DASubmitter { + t.Helper() cfg := config.Config{} - // Keep retries small and backoffs minimal cfg.DA.BlockTime.Duration = 1 * time.Millisecond cfg.DA.MaxSubmitAttempts = 3 cfg.DA.SubmitOptions = "opts" @@ -35,115 +33,69 @@ func newTestSubmitter(t *testing.T, mockClient *mocks.MockClient, override func( mockClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() mockClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() mockClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - return NewDASubmitter(mockClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil, nil) + return NewDASubmitter(mockClient, cfg, genesis.Genesis{}, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil, nil) } -func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { +func TestSubmitWithRetry_MempoolRetry_Succeeds(t *testing.T) { t.Parallel() client := mocks.NewMockClient(t) - nsBz := datypes.NamespaceFromString("ns").Bytes() - opts := []byte("opts") - var usedGas []float64 - client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). - Run(func(args mock.Arguments) { - usedGas = append(usedGas, args.Get(2).(float64)) - }). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusNotIncludedInBlock, SubmittedCount: 0}}). + client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusNotIncludedInBlock}}). Once() - ids := [][]byte{[]byte("id1"), []byte("id2"), []byte("id3")} - client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). - Run(func(args mock.Arguments) { - usedGas = append(usedGas, args.Get(2).(float64)) - }). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: ids, SubmittedCount: uint64(len(ids))}}). + client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: 2}}). Once() - s := newTestSubmitter(t, client, nil) + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() - items := []string{"a", "b", "c"} - marshalledItems := make([][]byte, len(items)) - for idx, item := range items { - marshalledItems[idx] = []byte(item) - } + var submittedCount int + s.submitWithRetry(t.Context(), [][]byte{[]byte("a"), []byte("b")}, nsBz, func(count int, _ uint64) { + submittedCount = count + }, nil, "item") - ctx := context.Background() - err := submitToDA[string]( - s, - ctx, - items, - marshalledItems, - func(_ []string, _ *datypes.ResultSubmit) {}, - "item", - nsBz, - opts, - ) - assert.NoError(t, err) - - // Sentinel value is preserved on retry - assert.Equal(t, []float64{-1, -1}, usedGas) + require.Equal(t, 2, submittedCount) + client.AssertExpectations(t) } -func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { +func TestSubmitWithRetry_UnknownError_RetriesThenSucceeds(t *testing.T) { t.Parallel() client := mocks.NewMockClient(t) - nsBz := datypes.NamespaceFromString("ns").Bytes() - opts := []byte("opts") - var usedGas []float64 - - // First attempt: unknown failure -> reasonFailure, gas unchanged for next attempt - client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). - Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). + client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, mock.Anything). Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusError, Message: "boom"}}). Once() - // Second attempt: same gas, success - ids := [][]byte{[]byte("id1")} - client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). - Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: ids, SubmittedCount: uint64(len(ids))}}). + client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: 1}}). Once() - s := newTestSubmitter(t, client, nil) + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() - items := []string{"x"} - marshalledItems := make([][]byte, len(items)) - for idx, item := range items { - marshalledItems[idx] = []byte(item) - } + var submittedCount int + s.submitWithRetry(t.Context(), [][]byte{[]byte("x")}, nsBz, func(count int, _ uint64) { + submittedCount = count + }, nil, "item") - ctx := context.Background() - err := submitToDA[string]( - s, - ctx, - items, - marshalledItems, - func(_ []string, _ *datypes.ResultSubmit) {}, - "item", - nsBz, - opts, - ) - assert.NoError(t, err) - assert.Equal(t, []float64{-1, -1}, usedGas) + require.Equal(t, 1, submittedCount) + client.AssertExpectations(t) } -func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { +func TestSubmitWithRetry_TooBig_HalvesBatch(t *testing.T) { t.Parallel() client := mocks.NewMockClient(t) - nsBz := datypes.NamespaceFromString("ns").Bytes() - - opts := []byte("opts") var batchSizes []int - client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, opts). + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). Run(func(args mock.Arguments) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) @@ -151,121 +103,152 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusTooBig}}). Once() - ids := [][]byte{[]byte("id1"), []byte("id2")} - client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, opts). + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). Run(func(args mock.Arguments) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) }). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: ids, SubmittedCount: uint64(len(ids))}}). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: 2}}). Once() - s := newTestSubmitter(t, client, nil) + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() - items := []string{"a", "b", "c", "d"} - marshalledItems := make([][]byte, len(items)) - for idx, item := range items { - marshalledItems[idx] = []byte(item) - } + s.submitWithRetry(t.Context(), [][]byte{[]byte("a"), []byte("b"), []byte("c"), []byte("d")}, nsBz, nil, nil, "item") - ctx := context.Background() - err := submitToDA[string]( - s, - ctx, - items, - marshalledItems, - func(_ []string, _ *datypes.ResultSubmit) {}, - "item", - nsBz, - opts, - ) - assert.NoError(t, err) - assert.Equal(t, []int{4, 2}, batchSizes) + require.Equal(t, []int{4, 2}, batchSizes) + client.AssertExpectations(t) } -func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { +func TestSubmitWithRetry_PartialSuccess_Advances(t *testing.T) { t.Parallel() client := mocks.NewMockClient(t) + nsBz := datypes.NamespaceFromString("ns").Bytes() + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: 2}}). + Once() + + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: 1}}). + Once() + + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() + + var totalSubmitted int + s.submitWithRetry(t.Context(), [][]byte{[]byte("a"), []byte("b"), []byte("c")}, nsBz, func(count int, _ uint64) { + totalSubmitted += count + }, nil, "item") + + require.Equal(t, 3, totalSubmitted) + client.AssertExpectations(t) +} + +func TestSubmitWithRetry_MaxAttempts_Exhausted(t *testing.T) { + t.Parallel() + + client := mocks.NewMockClient(t) nsBz := datypes.NamespaceFromString("ns").Bytes() - opts := []byte("opts") - var usedGas []float64 + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusError, Message: "fail"}}). + Times(3) + + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() + + var errReceived error + s.submitWithRetry(t.Context(), [][]byte{[]byte("a")}, nsBz, nil, func(err error) { + errReceived = err + }, "item") - client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). - Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). + require.Error(t, errReceived) + client.AssertExpectations(t) +} + +func TestSubmitWithRetry_AlreadyInMempool_Retries(t *testing.T) { + t.Parallel() + + client := mocks.NewMockClient(t) + nsBz := datypes.NamespaceFromString("ns").Bytes() + + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusAlreadyInMempool}}). Once() - ids := [][]byte{[]byte("id1")} - client.On("Submit", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). - Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: ids, SubmittedCount: uint64(len(ids))}}). + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: 1}}). Once() - s := newTestSubmitter(t, client, nil) + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() - items := []string{"only"} - marshalledItems := make([][]byte, len(items)) - for idx, item := range items { - marshalledItems[idx] = []byte(item) - } + var submittedCount int + s.submitWithRetry(t.Context(), [][]byte{[]byte("a")}, nsBz, func(count int, _ uint64) { + submittedCount = count + }, nil, "item") - ctx := context.Background() - err := submitToDA[string]( - s, - ctx, - items, - marshalledItems, - func(_ []string, _ *datypes.ResultSubmit) {}, - "item", - nsBz, - opts, - ) - assert.NoError(t, err) - assert.Equal(t, []float64{-1, -1}, usedGas) + require.Equal(t, 1, submittedCount) + client.AssertExpectations(t) } -func TestSubmitToDA_PartialSuccess_AdvancesWindow(t *testing.T) { +func TestSubmitWithRetry_EmptyBatch_Noop(t *testing.T) { t.Parallel() client := mocks.NewMockClient(t) + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() - nsBz := datypes.NamespaceFromString("ns").Bytes() + var errReceived error + s.submitWithRetry(t.Context(), nil, nil, nil, func(err error) { + errReceived = err + }, "item") - opts := []byte("opts") - var totalSubmitted int + require.NoError(t, errReceived) +} - firstIDs := [][]byte{[]byte("id1"), []byte("id2")} - client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, opts). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: firstIDs, SubmittedCount: uint64(len(firstIDs))}}). +func TestSubmitWithRetry_ContextCanceled_Stops(t *testing.T) { + t.Parallel() + + client := mocks.NewMockClient(t) + nsBz := datypes.NamespaceFromString("ns").Bytes() + + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusContextCanceled}}). Once() - secondIDs := [][]byte{[]byte("id3")} - client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, opts). - Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: secondIDs, SubmittedCount: uint64(len(secondIDs))}}). + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() + + var errReceived error + s.submitWithRetry(t.Context(), [][]byte{[]byte("a")}, nsBz, nil, func(err error) { + errReceived = err + }, "item") + + require.NoError(t, errReceived) + client.AssertExpectations(t) +} + +func TestSubmitWithRetry_SingleItemTooBig_Fails(t *testing.T) { + t.Parallel() + + client := mocks.NewMockClient(t) + nsBz := datypes.NamespaceFromString("ns").Bytes() + + client.On("Submit", mock.Anything, mock.Anything, mock.Anything, nsBz, mock.Anything). + Return(datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusTooBig}}). Once() - s := newTestSubmitter(t, client, nil) + s := newTestBatchSubmitter(t, client, nil) + defer s.Close() - items := []string{"a", "b", "c"} - marshalledItems := make([][]byte, len(items)) - for idx, item := range items { - marshalledItems[idx] = []byte(item) - } + var errReceived error + s.submitWithRetry(t.Context(), [][]byte{[]byte("a")}, nsBz, nil, func(err error) { + errReceived = err + }, "item") - ctx := context.Background() - err := submitToDA[string]( - s, - ctx, - items, - marshalledItems, - func(submitted []string, _ *datypes.ResultSubmit) { totalSubmitted += len(submitted) }, - "item", - nsBz, - opts, - ) - assert.NoError(t, err) - assert.Equal(t, 3, totalSubmitted) + require.ErrorIs(t, errReceived, common.ErrOversizedItem) + client.AssertExpectations(t) } diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index d25786018b..7397f30445 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -35,18 +35,15 @@ const ( func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manager, *mocks.MockClient, genesis.Genesis) { t.Helper() - // Create store and cache ds := sync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) require.NoError(t, err) - // Create config cfg := config.DefaultConfig() cfg.DA.Namespace = testHeaderNamespace cfg.DA.DataNamespace = testDataNamespace - // Mock DA client mockDA := mocks.NewMockClient(t) headerNamespace := datypes.NamespaceFromString(cfg.DA.Namespace).Bytes() dataNamespace := datypes.NamespaceFromString(cfg.DA.DataNamespace).Bytes() @@ -55,7 +52,6 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage mockDA.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() mockDA.On("HasForcedInclusionNamespace").Return(false).Maybe() - // Create genesis gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, @@ -63,7 +59,6 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage ProposerAddress: []byte("test-proposer"), } - // Create DA submitter daSubmitter := NewDASubmitter( mockDA, cfg, @@ -218,10 +213,10 @@ func TestDASubmitter_SubmitHeaders_Success(t *testing.T) { // Get headers from cache and submit headers, marshalledHeaders, err := cm.GetPendingHeaders(ctx) require.NoError(t, err) - err = submitter.SubmitHeaders(ctx, headers, marshalledHeaders, cm, signer) + err = submitter.SubmitHeaders(ctx, headers, marshalledHeaders, cm, signer, nil) require.NoError(t, err) + submitter.Close() - // Verify headers are marked as DA included _, ok1 := cm.GetHeaderDAIncludedByHeight(1) assert.True(t, ok1) _, ok2 := cm.GetHeaderDAIncludedByHeight(2) @@ -238,7 +233,7 @@ func TestDASubmitter_SubmitHeaders_NoPendingHeaders(t *testing.T) { // Get headers from cache (should be empty) and submit headers, marshalledHeaders, err := cm.GetPendingHeaders(ctx) require.NoError(t, err) - err = submitter.SubmitHeaders(ctx, headers, marshalledHeaders, cm, signer) + err = submitter.SubmitHeaders(ctx, headers, marshalledHeaders, cm, signer, nil) require.NoError(t, err) // Should succeed with no action mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) } @@ -333,8 +328,9 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { // Get data from cache and submit signedDataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer, gen) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer, gen, nil) require.NoError(t, err) + submitter.Close() // Verify data is marked as DA included _, ok := cm.GetDataDAIncludedByHeight(1) @@ -387,7 +383,7 @@ func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { // Get data from cache and submit signedDataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer, gen) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, signer, gen, nil) require.NoError(t, err) mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) @@ -406,7 +402,7 @@ func TestDASubmitter_SubmitData_NoPendingData(t *testing.T) { // Get data from cache (should be empty) and submit dataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, dataList, marshalledData, cm, signer, gen) + err = submitter.SubmitData(ctx, dataList, marshalledData, cm, signer, gen, nil) require.NoError(t, err) // Should succeed with no action mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) } @@ -447,7 +443,7 @@ func TestDASubmitter_SubmitData_NilSigner(t *testing.T) { // Get data from cache and submit with nil signer - should fail signedDataList, marshalledData, err := cm.GetPendingData(ctx) require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nil, gen) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nil, gen, nil) require.Error(t, err) assert.Contains(t, err.Error(), "signer is nil") mockDA.AssertNotCalled(t, "Submit", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) diff --git a/block/internal/submitting/da_submitter_tracing.go b/block/internal/submitting/da_submitter_tracing.go index e3c531fcf8..155dd92dff 100644 --- a/block/internal/submitting/da_submitter_tracing.go +++ b/block/internal/submitting/da_submitter_tracing.go @@ -30,22 +30,19 @@ func WithTracingDASubmitter(inner DASubmitterAPI) DASubmitterAPI { } } -func (t *tracedDASubmitter) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { +func (t *tracedDASubmitter) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error { ctx, span := t.tracer.Start(ctx, "DASubmitter.SubmitHeaders", trace.WithAttributes( attribute.Int("header.count", len(headers)), ), ) - defer span.End() - // calculate total size var totalBytes int for _, h := range marshalledHeaders { totalBytes += len(h) } span.SetAttributes(attribute.Int("header.total_bytes", totalBytes)) - // add height range if headers present if len(headers) > 0 { span.SetAttributes( attribute.Int64("header.start_height", int64(headers[0].Height())), @@ -53,32 +50,46 @@ func (t *tracedDASubmitter) SubmitHeaders(ctx context.Context, headers []*types. ) } - err := t.inner.SubmitHeaders(ctx, headers, marshalledHeaders, cache, signer) + var wrappedOnError func(error) + if onSubmitError != nil { + wrappedOnError = func(err error) { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + span.End() + onSubmitError(err) + } + } + + err := t.inner.SubmitHeaders(ctx, headers, marshalledHeaders, cache, signer, wrappedOnError) if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) + span.End() return err } + if onSubmitError == nil { + span.End() + } + return nil } -func (t *tracedDASubmitter) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { +func (t *tracedDASubmitter) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error { ctx, span := t.tracer.Start(ctx, "DASubmitter.SubmitData", trace.WithAttributes( attribute.Int("data.count", len(signedDataList)), ), ) - defer span.End() - // calculate total size var totalBytes int for _, d := range marshalledData { totalBytes += len(d) } span.SetAttributes(attribute.Int("data.total_bytes", totalBytes)) - // add height range if data present if len(signedDataList) > 0 { span.SetAttributes( attribute.Int64("data.start_height", int64(signedDataList[0].Height())), @@ -86,12 +97,33 @@ func (t *tracedDASubmitter) SubmitData(ctx context.Context, signedDataList []*ty ) } - err := t.inner.SubmitData(ctx, signedDataList, marshalledData, cache, signer, genesis) + var wrappedOnError func(error) + if onSubmitError != nil { + wrappedOnError = func(err error) { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + span.End() + onSubmitError(err) + } + } + + err := t.inner.SubmitData(ctx, signedDataList, marshalledData, cache, signer, genesis, wrappedOnError) if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) + span.End() return err } + if onSubmitError == nil { + span.End() + } + return nil } + +func (t *tracedDASubmitter) Close() { + t.inner.Close() +} diff --git a/block/internal/submitting/da_submitter_tracing_test.go b/block/internal/submitting/da_submitter_tracing_test.go index 6edc5c5ec1..36326dc575 100644 --- a/block/internal/submitting/da_submitter_tracing_test.go +++ b/block/internal/submitting/da_submitter_tracing_test.go @@ -19,24 +19,26 @@ import ( ) type mockDASubmitterAPI struct { - submitHeadersFn func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error - submitDataFn func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error + submitHeadersFn func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error + submitDataFn func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error } -func (m *mockDASubmitterAPI) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { +func (m *mockDASubmitterAPI) SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error { if m.submitHeadersFn != nil { - return m.submitHeadersFn(ctx, headers, marshalledHeaders, cache, signer) + return m.submitHeadersFn(ctx, headers, marshalledHeaders, cache, signer, onSubmitError) } return nil } -func (m *mockDASubmitterAPI) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { +func (m *mockDASubmitterAPI) SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error { if m.submitDataFn != nil { - return m.submitDataFn(ctx, signedDataList, marshalledData, cache, signer, genesis) + return m.submitDataFn(ctx, signedDataList, marshalledData, cache, signer, genesis, onSubmitError) } return nil } +func (m *mockDASubmitterAPI) Close() {} + func setupDASubmitterTrace(t *testing.T, inner DASubmitterAPI) (DASubmitterAPI, *tracetest.SpanRecorder) { t.Helper() sr := tracetest.NewSpanRecorder() @@ -48,7 +50,7 @@ func setupDASubmitterTrace(t *testing.T, inner DASubmitterAPI) (DASubmitterAPI, func TestTracedDASubmitter_SubmitHeaders_Success(t *testing.T) { mock := &mockDASubmitterAPI{ - submitHeadersFn: func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { + submitHeadersFn: func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error { return nil }, } @@ -66,7 +68,7 @@ func TestTracedDASubmitter_SubmitHeaders_Success(t *testing.T) { []byte("header3"), } - err := submitter.SubmitHeaders(ctx, headers, marshalledHeaders, nil, nil) + err := submitter.SubmitHeaders(ctx, headers, marshalledHeaders, nil, nil, nil) require.NoError(t, err) spans := sr.Ended() @@ -77,7 +79,7 @@ func TestTracedDASubmitter_SubmitHeaders_Success(t *testing.T) { attrs := span.Attributes() testutil.RequireAttribute(t, attrs, "header.count", 3) - testutil.RequireAttribute(t, attrs, "header.total_bytes", 21) // 7+7+7 + testutil.RequireAttribute(t, attrs, "header.total_bytes", 21) testutil.RequireAttribute(t, attrs, "header.start_height", int64(100)) testutil.RequireAttribute(t, attrs, "header.end_height", int64(102)) } @@ -85,7 +87,7 @@ func TestTracedDASubmitter_SubmitHeaders_Success(t *testing.T) { func TestTracedDASubmitter_SubmitHeaders_Error(t *testing.T) { expectedErr := errors.New("DA submission failed") mock := &mockDASubmitterAPI{ - submitHeadersFn: func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { + submitHeadersFn: func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error { return expectedErr }, } @@ -97,7 +99,7 @@ func TestTracedDASubmitter_SubmitHeaders_Error(t *testing.T) { } marshalledHeaders := [][]byte{[]byte("header1")} - err := submitter.SubmitHeaders(ctx, headers, marshalledHeaders, nil, nil) + err := submitter.SubmitHeaders(ctx, headers, marshalledHeaders, nil, nil, nil) require.Error(t, err) require.Equal(t, expectedErr, err) @@ -110,14 +112,14 @@ func TestTracedDASubmitter_SubmitHeaders_Error(t *testing.T) { func TestTracedDASubmitter_SubmitHeaders_Empty(t *testing.T) { mock := &mockDASubmitterAPI{ - submitHeadersFn: func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error { + submitHeadersFn: func(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error { return nil }, } submitter, sr := setupDASubmitterTrace(t, mock) ctx := context.Background() - err := submitter.SubmitHeaders(ctx, []*types.SignedHeader{}, [][]byte{}, nil, nil) + err := submitter.SubmitHeaders(ctx, []*types.SignedHeader{}, [][]byte{}, nil, nil, nil) require.NoError(t, err) spans := sr.Ended() @@ -131,7 +133,7 @@ func TestTracedDASubmitter_SubmitHeaders_Empty(t *testing.T) { func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { mock := &mockDASubmitterAPI{ - submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { + submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error { return nil }, } @@ -147,7 +149,7 @@ func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { []byte("data2data2"), } - err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil, genesis.Genesis{}) + err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil, genesis.Genesis{}, nil) require.NoError(t, err) spans := sr.Ended() @@ -158,7 +160,7 @@ func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { attrs := span.Attributes() testutil.RequireAttribute(t, attrs, "data.count", 2) - testutil.RequireAttribute(t, attrs, "data.total_bytes", 20) // 10+10 + testutil.RequireAttribute(t, attrs, "data.total_bytes", 20) testutil.RequireAttribute(t, attrs, "data.start_height", int64(100)) testutil.RequireAttribute(t, attrs, "data.end_height", int64(101)) } @@ -166,7 +168,7 @@ func TestTracedDASubmitter_SubmitData_Success(t *testing.T) { func TestTracedDASubmitter_SubmitData_Error(t *testing.T) { expectedErr := errors.New("data submission failed") mock := &mockDASubmitterAPI{ - submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error { + submitDataFn: func(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error { return expectedErr }, } @@ -178,7 +180,7 @@ func TestTracedDASubmitter_SubmitData_Error(t *testing.T) { } marshalledData := [][]byte{[]byte("data1")} - err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil, genesis.Genesis{}) + err := submitter.SubmitData(ctx, signedDataList, marshalledData, nil, nil, genesis.Genesis{}, nil) require.Error(t, err) require.Equal(t, expectedErr, err) diff --git a/block/internal/submitting/submitter.go b/block/internal/submitting/submitter.go index 25dcd781a1..2c613327f7 100644 --- a/block/internal/submitting/submitter.go +++ b/block/internal/submitting/submitter.go @@ -25,8 +25,9 @@ import ( // DASubmitterAPI defines minimal methods needed by Submitter for DA submissions. type DASubmitterAPI interface { - SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer) error - SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis) error + SubmitHeaders(ctx context.Context, headers []*types.SignedHeader, marshalledHeaders [][]byte, cache cache.Manager, signer signer.Signer, onSubmitError func(error)) error + SubmitData(ctx context.Context, signedDataList []*types.SignedData, marshalledData [][]byte, cache cache.Manager, signer signer.Signer, genesis genesis.Genesis, onSubmitError func(error)) error + Close() } // Submitter handles DA submission and inclusion processing for both sync and aggregator nodes @@ -153,17 +154,17 @@ func (s *Submitter) Stop() error { if s.cancel != nil { s.cancel() } - // Wait for goroutines to finish with a timeout to prevent hanging done := make(chan struct{}) go func() { s.wg.Wait() + s.daSubmitter.Close() close(done) }() select { case <-done: - // All goroutines finished cleanly case <-time.After(5 * time.Second): s.logger.Warn().Msg("submitter shutdown timed out waiting for goroutines, proceeding anyway") + s.daSubmitter.Close() } s.logger.Info().Msg("submitter stopped") return nil @@ -206,9 +207,15 @@ func (s *Submitter) daSubmissionLoop() { // Get headers with marshalled bytes from cache headers, marshalledHeaders, err := s.cache.GetPendingHeaders(s.ctx) if err != nil { + if len(headers) > 0 { + s.cache.ResetInFlightHeaderRange(headers[0].Height(), headers[len(headers)-1].Height()) + } s.logger.Error().Err(err).Msg("failed to get pending headers for batching decision") return } + if len(headers) == 0 { + return + } // Calculate total size (excluding signature) totalSize := uint64(0) @@ -223,27 +230,41 @@ func (s *Submitter) daSubmissionLoop() { timeSinceLastSubmit, ) - if shouldSubmit { - s.logger.Debug(). - Time("t", time.Now()). - Uint64("headers", headersNb). - Uint64("total_size_kb", totalSize/1024). - Dur("time_since_last", timeSinceLastSubmit). - Msg("batching strategy triggered header submission") - - if err := s.daSubmitter.SubmitHeaders(s.ctx, headers, marshalledHeaders, s.cache, s.signer); err != nil { - // Check for unrecoverable errors that indicate a critical issue - if errors.Is(err, common.ErrOversizedItem) { - s.logger.Error().Err(err). - Msg("CRITICAL: Header exceeds DA blob size limit - halting to prevent live lock") - s.sendCriticalError(fmt.Errorf("unrecoverable DA submission error: %w", err)) - return - } + if !shouldSubmit { + if len(headers) > 0 { + s.cache.ResetInFlightHeaderRange(headers[0].Height(), headers[len(headers)-1].Height()) + } + return + } + + s.logger.Debug(). + Time("t", time.Now()). + Uint64("headers", headersNb). + Uint64("total_size_kb", totalSize/1024). + Dur("time_since_last", timeSinceLastSubmit). + Msg("batching strategy triggered header submission") + + s.lastHeaderSubmit.Store(time.Now().UnixNano()) + onError := func(err error) { + if errors.Is(err, common.ErrOversizedItem) { + s.logger.Error().Err(err). + Msg("CRITICAL: Header exceeds DA blob size limit - halting to prevent live lock") + s.sendCriticalError(fmt.Errorf("unrecoverable DA submission error: %w", err)) + return + } + if len(headers) > 0 { + s.cache.ResetInFlightHeaderRange(headers[0].Height(), headers[len(headers)-1].Height()) + } + if err != nil { s.logger.Error().Err(err).Msg("failed to submit headers") - } else { - s.lastHeaderSubmit.Store(time.Now().UnixNano()) } } + if err := s.daSubmitter.SubmitHeaders(s.ctx, headers, marshalledHeaders, s.cache, s.signer, onError); err != nil { + if len(headers) > 0 { + s.cache.ResetInFlightHeaderRange(headers[0].Height(), headers[len(headers)-1].Height()) + } + s.logger.Error().Err(err).Msg("failed to enqueue header submission") + } }() } } @@ -266,9 +287,15 @@ func (s *Submitter) daSubmissionLoop() { // Get data with marshalled bytes from cache signedDataList, marshalledData, err := s.cache.GetPendingData(s.ctx) if err != nil { + if len(signedDataList) > 0 { + s.cache.ResetInFlightDataRange(signedDataList[0].Height(), signedDataList[len(signedDataList)-1].Height()) + } s.logger.Error().Err(err).Msg("failed to get pending data for batching decision") return } + if len(signedDataList) == 0 { + return + } // Calculate total size (excluding signature) totalSize := uint64(0) @@ -283,27 +310,41 @@ func (s *Submitter) daSubmissionLoop() { timeSinceLastSubmit, ) - if shouldSubmit { - s.logger.Debug(). - Time("t", time.Now()). - Uint64("data", dataNb). - Uint64("total_size_kb", totalSize/1024). - Dur("time_since_last", timeSinceLastSubmit). - Msg("batching strategy triggered data submission") - - if err := s.daSubmitter.SubmitData(s.ctx, signedDataList, marshalledData, s.cache, s.signer, s.genesis); err != nil { - // Check for unrecoverable errors that indicate a critical issue - if errors.Is(err, common.ErrOversizedItem) { - s.logger.Error().Err(err). - Msg("CRITICAL: Data exceeds DA blob size limit - halting to prevent live lock") - s.sendCriticalError(fmt.Errorf("unrecoverable DA submission error: %w", err)) - return - } + if !shouldSubmit { + if len(signedDataList) > 0 { + s.cache.ResetInFlightDataRange(signedDataList[0].Height(), signedDataList[len(signedDataList)-1].Height()) + } + return + } + + s.logger.Debug(). + Time("t", time.Now()). + Uint64("data", dataNb). + Uint64("total_size_kb", totalSize/1024). + Dur("time_since_last", timeSinceLastSubmit). + Msg("batching strategy triggered data submission") + + s.lastDataSubmit.Store(time.Now().UnixNano()) + onError := func(err error) { + if errors.Is(err, common.ErrOversizedItem) { + s.logger.Error().Err(err). + Msg("CRITICAL: Data exceeds DA blob size limit - halting to prevent live lock") + s.sendCriticalError(fmt.Errorf("unrecoverable DA submission error: %w", err)) + return + } + if len(signedDataList) > 0 { + s.cache.ResetInFlightDataRange(signedDataList[0].Height(), signedDataList[len(signedDataList)-1].Height()) + } + if err != nil { s.logger.Error().Err(err).Msg("failed to submit data") - } else { - s.lastDataSubmit.Store(time.Now().UnixNano()) } } + if err := s.daSubmitter.SubmitData(s.ctx, signedDataList, marshalledData, s.cache, s.signer, s.genesis, onError); err != nil { + if len(signedDataList) > 0 { + s.cache.ResetInFlightDataRange(signedDataList[0].Height(), signedDataList[len(signedDataList)-1].Height()) + } + s.logger.Error().Err(err).Msg("failed to enqueue data submission") + } }() } } @@ -433,6 +474,9 @@ func putUint64Metadata(ctx context.Context, st store.Store, key string, val uint // sendCriticalError sends a critical error to the error channel without blocking func (s *Submitter) sendCriticalError(err error) { + if s.cancel != nil { + s.cancel() + } if s.errorCh != nil { select { case s.errorCh <- err: diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index b1e2d2e988..6e684176be 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -421,7 +421,7 @@ type fakeDASubmitter struct { chData chan struct{} } -func (f *fakeDASubmitter) SubmitHeaders(ctx context.Context, _ []*types.SignedHeader, _ [][]byte, _ cache.Manager, _ signer.Signer) error { +func (f *fakeDASubmitter) SubmitHeaders(ctx context.Context, _ []*types.SignedHeader, _ [][]byte, _ cache.Manager, _ signer.Signer, _ func(error)) error { select { case f.chHdr <- struct{}{}: default: @@ -429,7 +429,7 @@ func (f *fakeDASubmitter) SubmitHeaders(ctx context.Context, _ []*types.SignedHe return nil } -func (f *fakeDASubmitter) SubmitData(ctx context.Context, _ []*types.SignedData, _ [][]byte, _ cache.Manager, _ signer.Signer, _ genesis.Genesis) error { +func (f *fakeDASubmitter) SubmitData(ctx context.Context, _ []*types.SignedData, _ [][]byte, _ cache.Manager, _ signer.Signer, _ genesis.Genesis, _ func(error)) error { select { case f.chData <- struct{}{}: default: @@ -437,6 +437,8 @@ func (f *fakeDASubmitter) SubmitData(ctx context.Context, _ []*types.SignedData, return nil } +func (f *fakeDASubmitter) Close() {} + // fakeSigner implements signer.Signer with deterministic behavior for tests. type fakeSigner struct{} diff --git a/block/public.go b/block/public.go index cc7691c299..695983a3ad 100644 --- a/block/public.go +++ b/block/public.go @@ -19,6 +19,14 @@ func DefaultBlockOptions() BlockOptions { return common.DefaultBlockOptions() } +// SetMaxBlobSize overrides the per-blob byte cap used by the executor +// and DA submitter when sizing batches and validating individual blobs. +// Intended for one-shot startup wiring (e.g. to lift Celestia's 5 MiB +// default to Fibre's 120 MiB headroom). +func SetMaxBlobSize(n uint64) { + common.DefaultMaxBlobSize = n +} + // Expose Metrics for constructor type Metrics = common.Metrics @@ -42,6 +50,17 @@ type DAVerifier = da.Verifier // This is the complete interface implemented by the concrete DA client. type FullDAClient = da.FullClient +// FiberClient is the interface for Fiber DA backends. Implementations +// handle upload, download and listen operations against a Fiber network. +type FiberClient = da.FiberClient + +// Fiber types exposed for external adapters (e.g. tools/local-fiber). +type ( + FiberBlobID = da.BlobID + FiberUploadResult = da.UploadResult + FiberBlobEvent = da.BlobEvent +) + // NewDAClient creates a new DA client backed by the blob JSON-RPC API. // The returned client implements both DAClient and DAVerifier interfaces. func NewDAClient( @@ -63,6 +82,34 @@ func NewDAClient( return base } +// NewFiberDAClient creates a new DA client backed by the Fiber protocol. +// The fiberClient parameter must implement the da.FiberClient interface. +// The returned client implements both DAClient and DAVerifier interfaces. +func NewFiberDAClient( + fiberClient da.FiberClient, + config config.Config, + logger zerolog.Logger, + lastKnownDaHeight uint64, +) FullDAClient { + base, err := da.NewFiberClient(da.FiberConfig{ + Client: fiberClient, + Logger: logger, + DefaultTimeout: config.DA.RequestTimeout.Duration, + Namespace: config.DA.GetNamespace(), + DataNamespace: config.DA.GetDataNamespace(), + LastKnownDAHeight: lastKnownDaHeight, + }) + if err != nil { + panic(err) + } + + if config.Instrumentation.IsTracingEnabled() { + return da.WithTracingClient(base) + } + + return base +} + // Exported errors used by the sequencers var ( // ErrForceInclusionNotConfigured is returned when force inclusion is not configured. diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index 8b241c98db..39da4cae05 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -15,7 +15,6 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" - "github.com/evstack/ev-node/block" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/node" @@ -25,7 +24,10 @@ import ( "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" pkgsigner "github.com/evstack/ev-node/pkg/signer" + "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/pkg/telemetry" + + "github.com/evstack/ev-node/block" ) // ParseConfig is an helpers that loads the node configuration and validates it. @@ -86,6 +88,7 @@ func StartNode( nodeConfig rollconf.Config, genesis genesispkg.Genesis, nodeOptions node.NodeOptions, + fiberClient block.FiberClient, ) error { ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() @@ -149,12 +152,32 @@ func StartNode( } } - blobClient, err := blobrpc.NewWSClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, "") - if err != nil { - return fmt.Errorf("failed to create blob client: %w", err) + var daClient block.FullDAClient + if nodeConfig.DA.IsFiberEnabled() { + if fiberClient == nil { + return fmt.Errorf("fiber DA is enabled but no fiber client was provided") + } + + mainKV := store.NewEvNodeKVStore(datastore) + baseStore := store.New(mainKV) + + var latestDAHeight uint64 + latestState, err := baseStore.GetState(cmd.Context()) + if err != nil { + latestDAHeight = genesis.DAStartHeight + } else { + latestDAHeight = latestState.DAHeight + } + + daClient = block.NewFiberDAClient(fiberClient, nodeConfig, logger, latestDAHeight) + } else { + blobClient, err := blobrpc.NewWSClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, "") + if err != nil { + return fmt.Errorf("failed to create blob client: %w", err) + } + defer blobClient.Close() + daClient = block.NewDAClient(blobClient, nodeConfig, logger) } - defer blobClient.Close() - daClient := block.NewDAClient(blobClient, nodeConfig, logger) // sanity check for based sequencer if nodeConfig.Node.BasedSequencer && genesis.DAStartHeight == 0 { diff --git a/pkg/cmd/run_node_test.go b/pkg/cmd/run_node_test.go index 1ed1a7e189..c41d7a2a3e 100644 --- a/pkg/cmd/run_node_test.go +++ b/pkg/cmd/run_node_test.go @@ -651,7 +651,7 @@ func TestStartNodeErrors(t *testing.T) { runFunc := func(ctx context.Context) { currentTestLogger := zerolog.Nop() cmd.SetContext(ctx) - err := StartNode(currentTestLogger, cmd, executor, sequencer, nodeKey, ds, nodeConfig, testGenesis, node.NodeOptions{}) //nolint:contextcheck // test invokes command entrypoint directly + err := StartNode(currentTestLogger, cmd, executor, sequencer, nodeKey, ds, nodeConfig, testGenesis, node.NodeOptions{}, nil) //nolint:contextcheck // test invokes command entrypoint directly if tc.expectedError != "" { assert.ErrorContains(t, err, tc.expectedError) } else { @@ -668,7 +668,7 @@ func TestStartNodeErrors(t *testing.T) { } else { assert.NotPanics(t, func() { runFunc(baseCtx) }) checkLogger := zerolog.Nop() - err := StartNode(checkLogger, cmd, executor, sequencer, nodeKey, ds, nodeConfig, testGenesis, node.NodeOptions{}) //nolint:contextcheck // test invokes command entrypoint directly + err := StartNode(checkLogger, cmd, executor, sequencer, nodeKey, ds, nodeConfig, testGenesis, node.NodeOptions{}, nil) //nolint:contextcheck // test invokes command entrypoint directly if tc.expectedError != "" { assert.ErrorContains(t, err, tc.expectedError) } @@ -702,7 +702,7 @@ func newRunNodeCmd( Aliases: []string{"node", "run"}, Short: "Run the rollkit node", RunE: func(cmd *cobra.Command, args []string) error { //nolint:contextcheck // cobra RunE signature is fixed - return StartNode(zerolog.Nop(), cmd, executor, sequencer, nodeKey, datastore, nodeConfig, testGenesis, node.NodeOptions{}) + return StartNode(zerolog.Nop(), cmd, executor, sequencer, nodeKey, datastore, nodeConfig, testGenesis, node.NodeOptions{}, nil) }, } diff --git a/pkg/config/config.go b/pkg/config/config.go index 87e12f5597..7cbb780a21 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -3,6 +3,7 @@ package config import ( "errors" "fmt" + "net/url" "os" "path" "path/filepath" @@ -92,6 +93,19 @@ const ( // FlagDAStartHeight is a flag for forcing the DA retrieval height to start from a specific height FlagDAStartHeight = FlagPrefixEvnode + "da.start_height" + // Fiber DA configuration flags + + // FlagDAFiberEnabled enables the Fiber DA client instead of the default JSON-RPC blob client + FlagDAFiberEnabled = FlagPrefixEvnode + "da.fiber.enabled" + // FlagDAFiberConsensusAddress is the gRPC address of the celestia-app node for Fiber state queries + FlagDAFiberConsensusAddress = FlagPrefixEvnode + "da.fiber.consensus_address" + // FlagConsensusChainID is the Chain ID of the celestia app node + FlagDAFiberConsensusChainID = FlagPrefixEvnode + "da.fiber.consensus_chain_id" + // FlagDAFiberBridgeAddress is the gRPC address of the bridge node for Fiber state queries + FlagDAFiberBridgeAddress = FlagPrefixEvnode + "da.fiber.bridge_address" + // FlagDAFiberKeyName is the key name in the keyring to use for signing payment promises + FlagDAFiberKeyName = FlagPrefixEvnode + "da.fiber.key_name" + // P2P configuration flags // FlagP2PListenAddress is a flag for specifying the P2P listen address @@ -266,6 +280,94 @@ type DAConfig struct { BatchSizeThreshold float64 `mapstructure:"batch_size_threshold" yaml:"batch_size_threshold" comment:"Minimum blob size threshold (as fraction of max blob size, 0.0-1.0) before submitting. Only applies to 'size' and 'adaptive' strategies. Example: 0.8 means wait until batch is 80% full. Default: 0.8."` BatchMaxDelay DurationWrapper `mapstructure:"batch_max_delay" yaml:"batch_max_delay" comment:"Maximum time to wait before submitting a batch regardless of size. Applies to 'time' and 'adaptive' strategies. Lower values reduce latency but may increase costs. Examples: \"6s\", \"12s\", \"30s\". Default: DA BlockTime."` BatchMinItems uint64 `mapstructure:"batch_min_items" yaml:"batch_min_items" comment:"Minimum number of items (headers or data) to accumulate before considering submission. Helps avoid submitting single items when more are expected soon. Default: 1."` + + // Fiber DA client configuration + Fiber FiberDAConfig `mapstructure:"fiber" yaml:"fiber"` +} + +// FiberDAConfig contains configuration for the Fiber DA client. +// When Enabled is true, the Fiber client is used instead of the default +// JSON-RPC blob client for DA operations. +type FiberDAConfig struct { + // Enabled switches the DA backend from the default JSON-RPC blob client + // to the Fiber protocol client. + Enabled bool `mapstructure:"enabled" yaml:"enabled" comment:"Enable the Fiber DA client for direct validator communication instead of the default JSON-RPC blob client"` + // ConsensusChainID is the Chain ID of the chain to which data is posted. + ConsensusChainID string `mapstructure:"consensus_chain_id" yaml:"consensus_chain_id" comment:"Chain ID of the chain to which data is posted"` + // ConsensusAddress is the gRPC address of the celestia-app node used for state queries (validator set, chain ID, promise verification). + ConsensusAddress string `mapstructure:"consensus_address" yaml:"consensus_address" comment:"gRPC address of the celestia-app node for Fiber state queries (host:port)"` + // BridgeAddress is the address of the bridge node. + BridgeAddress string `mapstructure:"bridge_address" yaml:"bridge_address" comment:"Bridge Node Address for Fiber"` + // KeyringPath is the directory path containing the keyring for signing + // Fiber payment promises. + KeyringPath string `mapstructure:"keyring_path" yaml:"keyring_path" comment:"Path to the keyring directory for Fiber payment promise signing"` + // KeyName is the name of the key in the keyring to use for signing. + KeyName string `mapstructure:"key_name" yaml:"key_name" comment:"Name of the key in the keyring to use for signing Fiber payment promises"` + // UploadConcurrency limits the number of concurrent upload connections +} + +// Validate checks that a FiberDAConfig is usable. Only called when enabled; +// a disabled block is always valid because the Fibre client is not built. +func (c *FiberDAConfig) Validate() error { + if !c.Enabled { + return nil + } + if c.ConsensusAddress == "" { + return fmt.Errorf("%s is required when fiber DA is enabled", FlagDAFiberConsensusAddress) + } + + if c.BridgeAddress == "" { + return fmt.Errorf("%s is required when fiber DA is enabled", FlagDAFiberBridgeAddress) + } + + if c.ConsensusChainID == "" { + return fmt.Errorf("%s is required when fiber DA is enabled", FlagDAFiberConsensusChainID) + } + + u, err := url.Parse(c.BridgeAddress) + if err != nil { + return fmt.Errorf("%s: %w", FlagDAFiberBridgeAddress, err) + } + if u.Scheme != "ws" && u.Scheme != "wss" { + return fmt.Errorf( + "%s must use ws:// or wss:// (got %q) — blob.Subscribe requires WebSocket, HTTP JSON-RPC cannot stream channels", + FlagDAFiberBridgeAddress, u.Scheme, + ) + } + + return nil +} + +// IsFiberEnabled returns true if the Fiber DA client is configured and enabled. +func (d *DAConfig) IsFiberEnabled() bool { + return d.Fiber.Enabled +} + +// ApplyFiberDefaults flips the DA client to Fiber-friendly defaults +// when the Fiber profile is enabled — adaptive batching, a 1 s +// DA.BlockTime so inclusion-tracking keeps pace with Fibre's +// settlement, and a bounded pending-cache window so a Fibre stall +// can't grow memory unbounded. Caller-provided non-zero values for +// the tunables (BatchSizeThreshold, BatchMinItems) are preserved. +// +// Intended to be invoked once at runner startup, after parsing the +// usual config but before constructing the DA client. +func (c *Config) ApplyFiberDefaults() { + if !c.DA.IsFiberEnabled() { + return + } + + c.DA.BatchingStrategy = "adaptive" + if c.DA.BatchSizeThreshold <= 0 || c.DA.BatchSizeThreshold > 1 { + c.DA.BatchSizeThreshold = 0.5 + } + c.DA.BatchMaxDelay = DurationWrapper{Duration: 8 * time.Second} + if c.DA.BatchMinItems == 0 { + c.DA.BatchMinItems = 1 + } + + c.DA.BlockTime = DurationWrapper{Duration: 1 * time.Second} + c.Node.MaxPendingHeadersAndData = 50 } // GetNamespace returns the namespace for header submissions. @@ -620,6 +722,13 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Uint64(FlagDAStartHeight, def.DA.StartHeight, "force DA retrieval to start from a specific height (0 for disabled)") cmd.Flags().MarkHidden(FlagDAStartHeight) + // Fiber DA configuration flags + cmd.Flags().Bool(FlagDAFiberEnabled, def.DA.Fiber.Enabled, "enable the Fiber DA client for direct validator communication") + cmd.Flags().String(FlagDAFiberConsensusAddress, def.DA.Fiber.ConsensusAddress, "gRPC address of the celestia-app node for Fiber state queries (host:port)") + cmd.Flags().String(FlagDAFiberConsensusChainID, def.DA.Fiber.ConsensusChainID, "Chain ID of the celestia app") + cmd.Flags().String(FlagDAFiberBridgeAddress, def.DA.Fiber.BridgeAddress, "JSON RPC of the DA node") + cmd.Flags().String(FlagDAFiberKeyName, def.DA.Fiber.KeyName, "name of the key in the keyring for signing Fiber payment promises") + // P2P configuration flags cmd.Flags().String(FlagP2PListenAddress, def.P2P.ListenAddress, "P2P listen address (host:port)") cmd.Flags().String(FlagP2PPeers, def.P2P.Peers, "Comma separated list of seed nodes to connect to") diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 02040ebba9..e3a69b9be0 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -78,6 +78,16 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagDAMempoolTTL, DefaultConfig().DA.MempoolTTL) assertFlagValue(t, flags, FlagDAMaxSubmitAttempts, DefaultConfig().DA.MaxSubmitAttempts) assertFlagValue(t, flags, FlagDARequestTimeout, DefaultConfig().DA.RequestTimeout.Duration) + assertFlagValue(t, flags, FlagDABatchingStrategy, DefaultConfig().DA.BatchingStrategy) + assertFlagValue(t, flags, FlagDABatchSizeThreshold, DefaultConfig().DA.BatchSizeThreshold) + assertFlagValue(t, flags, FlagDABatchMaxDelay, DefaultConfig().DA.BatchMaxDelay.Duration) + assertFlagValue(t, flags, FlagDABatchMinItems, DefaultConfig().DA.BatchMinItems) + + // DA Fiber flags + assertFlagValue(t, flags, FlagDAFiberEnabled, DefaultConfig().DA.Fiber.Enabled) + assertFlagValue(t, flags, FlagDAFiberConsensusAddress, DefaultConfig().DA.Fiber.ConsensusAddress) + assertFlagValue(t, flags, FlagDAFiberConsensusChainID, DefaultConfig().DA.Fiber.ConsensusChainID) + assertFlagValue(t, flags, FlagDAFiberKeyName, DefaultConfig().DA.Fiber.KeyName) // P2P flags assertFlagValue(t, flags, FlagP2PListenAddress, DefaultConfig().P2P.ListenAddress) @@ -144,7 +154,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagPruningInterval, DefaultConfig().Pruning.Interval.Duration) // Count the number of flags we're explicitly checking - expectedFlagCount := 82 // Update this number if you add more flag checks above + expectedFlagCount := 84 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 233585e0c5..43c40496b2 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -84,6 +84,13 @@ func DefaultConfig() Config { BatchSizeThreshold: 0.8, BatchMaxDelay: DurationWrapper{0}, // 0 means use DA BlockTime BatchMinItems: 1, + Fiber: FiberDAConfig{ + Enabled: false, + ConsensusAddress: "127.0.0.1:9090", + ConsensusChainID: "mocha-4", + BridgeAddress: "ws://127.0.0.1:1234", + KeyName: "default-fibre", + }, }, Instrumentation: DefaultInstrumentationConfig(), Log: LogConfig{ diff --git a/pkg/sequencers/solo/sequencer.go b/pkg/sequencers/solo/sequencer.go index 0fcae9f31c..86fa08d45d 100644 --- a/pkg/sequencers/solo/sequencer.go +++ b/pkg/sequencers/solo/sequencer.go @@ -16,6 +16,12 @@ import ( var ErrInvalidID = errors.New("invalid chain id") +var ( + emptyBatch = &coresequencer.Batch{} + submitBatchResp = &coresequencer.SubmitBatchTxsResponse{} + verifyBatchOKResp = &coresequencer.VerifyBatchResponse{Status: true} +) + var _ coresequencer.Sequencer = (*SoloSequencer)(nil) // SoloSequencer is a single-leader sequencer without forced inclusion @@ -55,14 +61,14 @@ func (s *SoloSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.Su } if req.Batch == nil || len(req.Batch.Transactions) == 0 { - return &coresequencer.SubmitBatchTxsResponse{}, nil + return submitBatchResp, nil } s.mu.Lock() defer s.mu.Unlock() s.queue = append(s.queue, req.Batch.Transactions...) - return &coresequencer.SubmitBatchTxsResponse{}, nil + return submitBatchResp, nil } func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { @@ -77,7 +83,7 @@ func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetN if len(txs) == 0 { return &coresequencer.GetNextBatchResponse{ - Batch: &coresequencer.Batch{}, + Batch: emptyBatch, Timestamp: time.Now().UTC(), BatchData: req.LastBatchData, }, nil @@ -94,21 +100,22 @@ func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetN filterStatuses, err := s.executor.FilterTxs(ctx, txs, req.MaxBytes, maxGas, false) if err != nil { s.logger.Warn().Err(err).Msg("failed to filter transactions, proceeding with unfiltered") - filterStatuses = make([]execution.FilterStatus, len(txs)) - for i := range filterStatuses { - filterStatuses[i] = execution.FilterOK - } + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: txs}, + Timestamp: time.Now().UTC(), + BatchData: req.LastBatchData, + }, nil } - var validTxs [][]byte + write := 0 var postponedTxs [][]byte for i, status := range filterStatuses { switch status { case execution.FilterOK: - validTxs = append(validTxs, txs[i]) + txs[write] = txs[i] + write++ case execution.FilterPostpone: postponedTxs = append(postponedTxs, txs[i]) - case execution.FilterRemove: } } @@ -119,7 +126,7 @@ func (s *SoloSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetN } return &coresequencer.GetNextBatchResponse{ - Batch: &coresequencer.Batch{Transactions: validTxs}, + Batch: &coresequencer.Batch{Transactions: txs[:write]}, Timestamp: time.Now().UTC(), BatchData: req.LastBatchData, }, nil @@ -130,7 +137,7 @@ func (s *SoloSequencer) VerifyBatch(ctx context.Context, req coresequencer.Verif return nil, ErrInvalidID } - return &coresequencer.VerifyBatchResponse{Status: true}, nil + return verifyBatchOKResp, nil } func (s *SoloSequencer) SetDAHeight(height uint64) { diff --git a/tools/celestia-node-fiber/adapter.go b/tools/celestia-node-fiber/adapter.go new file mode 100644 index 0000000000..db82333937 --- /dev/null +++ b/tools/celestia-node-fiber/adapter.go @@ -0,0 +1,167 @@ +package celestianodefiber + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + + appfibre "github.com/celestiaorg/celestia-app/v9/fibre" + libshare "github.com/celestiaorg/go-square/v4/share" + + "github.com/celestiaorg/celestia-node/api/client" + blobapi "github.com/celestiaorg/celestia-node/nodebuilder/blob" + fibreapi "github.com/celestiaorg/celestia-node/nodebuilder/fibre" + headerapi "github.com/celestiaorg/celestia-node/nodebuilder/header" + + "github.com/evstack/ev-node/block" +) + +// blobExpiration is the advisory ExpiresAt returned from Upload. celestia-node +// does not yet expose per-blob retention via UploadResult; 24h mirrors the +// placeholder used by tools/local-fiber so ev-node callers do not treat +// freshly-uploaded blobs as already-expired. +// +// TODO: surface actual retention from the x/fibre protocol params once +// celestia-node exposes it. +const blobExpiration = 24 * time.Hour + +// defaultListenChannelSize matches the buffer used by celestia-node's +// blob.Subscribe so the forwarder does not become a tighter bottleneck than +// the upstream stream. +const defaultListenChannelSize = 16 + +// Adapter implements the ev-node fiber.DA interface on top of a +// celestia-node api/client.Client. Upload and Download run locally against +// consensus gRPC + FSPs; Listen forwards share-version-2 blobs from the +// bridge node's subscription stream. +type Adapter struct { + fibre fibreapi.Module + blob blobapi.Module + header headerapi.Module + listenChannelSz int + + // closer, if non-nil, is invoked by Close. Set only when the Adapter + // owns the underlying api/client.Client (via New). + closer func() error +} + +// Compile-time assertion that Adapter satisfies the ev-node Fiber DA contract. +var _ block.FiberClient = (*Adapter)(nil) + +// New constructs a celestia-node api/client.Client from cfg and wraps it as +// an Adapter. The returned Adapter owns the client and must be closed via +// Close to release its gRPC connections. +func New(ctx context.Context, cfg Config, kr keyring.Keyring) (*Adapter, error) { + c, err := client.New(ctx, cfg.Client, kr) + if err != nil { + return nil, fmt.Errorf("constructing celestia-node client: %w", err) + } + return &Adapter{ + fibre: c.Fibre, + blob: c.Blob, + header: c.Header, + listenChannelSz: resolveListenChannelSize(cfg.ListenChannelSize), + closer: c.Close, + }, nil +} + +// FromModules wraps existing Fibre and Blob module implementations. It is +// intended for tests and for callers that already own a *client.Client and +// want to pass its Fibre + Blob fields directly. The caller retains +// responsibility for the underlying client's lifecycle; Close is a no-op. +func FromModules(fibre fibreapi.Module, blob blobapi.Module, listenChannelSize int) *Adapter { + return &Adapter{ + fibre: fibre, + blob: blob, + listenChannelSz: resolveListenChannelSize(listenChannelSize), + } +} + +// Close tears down the underlying client, if the Adapter owns one. +func (a *Adapter) Close() error { + if a.closer == nil { + return nil + } + return a.closer() +} + +// Head returns the bridge node's current local-head height. Returns 0 if +// the underlying client was constructed via FromModules without a Header +// module. +func (a *Adapter) Head(ctx context.Context) (uint64, error) { + if a.header == nil { + return 0, fmt.Errorf("Adapter has no Header module; construct via New") + } + h, err := a.header.LocalHead(ctx) + if err != nil { + return 0, fmt.Errorf("header.LocalHead: %w", err) + } + return h.Height(), nil +} + +// Upload implements fiber.DA.Upload. client.Fibre.Upload does off-chain row +// upload plus validator-sig aggregation and spawns a background +// MsgPayForFibre broadcast; this call returns as soon as the off-chain +// stages finish. +func (a *Adapter) Upload( + ctx context.Context, + namespace []byte, + data []byte, +) (block.FiberUploadResult, error) { + ns, err := toV0Namespace(namespace) + if err != nil { + return block.FiberUploadResult{}, fmt.Errorf("namespace: %w", err) + } + up, err := a.fibre.Upload(ctx, ns, data, nil) + if err != nil { + return block.FiberUploadResult{}, fmt.Errorf("fibre upload: %w", err) + } + if up == nil { + return block.FiberUploadResult{}, errors.New("fibre upload returned nil result") + } + // Copy the returned BlobID to decouple the caller from any internal + // reuse of the upstream slice. + id := make(block.FiberBlobID, len(up.BlobID)) + copy(id, up.BlobID) + return block.FiberUploadResult{ + BlobID: id, + ExpiresAt: time.Now().Add(blobExpiration), + }, nil +} + +// Download implements fiber.DA.Download. Reads go directly to FSPs via the +// appfibre client embedded in client.Fibre — no bridge hop. +func (a *Adapter) Download(ctx context.Context, blobID block.FiberBlobID) ([]byte, error) { + res, err := a.fibre.Download(ctx, appfibre.BlobID(blobID)) + if err != nil { + return nil, fmt.Errorf("fibre download: %w", err) + } + if res == nil { + return nil, errors.New("fibre download returned nil result") + } + return res.Data, nil +} + +// toV0Namespace converts an ev-node raw namespace ([]byte) into a libshare +// Namespace. The adapter contract is that callers pass the 10-byte v0 +// namespace ID (matching ev-node's datypes.NamespaceFromString output); we +// surface a clear error on length mismatch rather than silently padding. +func toV0Namespace(id []byte) (libshare.Namespace, error) { + if len(id) != libshare.NamespaceVersionZeroIDSize { + return libshare.Namespace{}, fmt.Errorf( + "expected %d bytes, got %d", + libshare.NamespaceVersionZeroIDSize, len(id), + ) + } + return libshare.NewV0Namespace(id) +} + +func resolveListenChannelSize(size int) int { + if size <= 0 { + return defaultListenChannelSize + } + return size +} \ No newline at end of file diff --git a/tools/celestia-node-fiber/adapter_test.go b/tools/celestia-node-fiber/adapter_test.go new file mode 100644 index 0000000000..b86d2a0386 --- /dev/null +++ b/tools/celestia-node-fiber/adapter_test.go @@ -0,0 +1,278 @@ +package celestianodefiber_test + +import ( + "bytes" + "context" + "errors" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + appfibre "github.com/celestiaorg/celestia-app/v9/fibre" + libshare "github.com/celestiaorg/go-square/v4/share" + + nodeblob "github.com/celestiaorg/celestia-node/blob" + blobapi "github.com/celestiaorg/celestia-node/nodebuilder/blob" + celfibre "github.com/celestiaorg/celestia-node/fibre" + fibreapi "github.com/celestiaorg/celestia-node/nodebuilder/fibre" + "github.com/celestiaorg/celestia-node/state/txclient" + + "github.com/evstack/ev-node/block" + cnfiber "github.com/evstack/ev-node/tools/celestia-node-fiber" +) + +// namespaceBytes returns a 10-byte v0 namespace ID for test fixtures. +func namespaceBytes() []byte { return bytes.Repeat([]byte{0xab}, libshare.NamespaceVersionZeroIDSize) } + +func namespace(t *testing.T) libshare.Namespace { + t.Helper() + ns, err := libshare.NewV0Namespace(namespaceBytes()) + require.NoError(t, err) + return ns +} + +// fakeFibre is a minimal stand-in for fibreapi.Module. We hand-roll it +// because celestia-node's generated mocks use two different gomock forks +// across the fibre and blob packages, which can't share a Controller. +type fakeFibre struct { + uploadFn func(context.Context, libshare.Namespace, []byte, *txclient.TxConfig) (*fibreapi.UploadResult, error) + downloadFn func(context.Context, appfibre.BlobID) (*fibreapi.GetBlobResult, error) +} + +var _ fibreapi.Module = (*fakeFibre)(nil) + +func (f *fakeFibre) Submit(context.Context, libshare.Namespace, []byte, *txclient.TxConfig) (*fibreapi.SubmitResult, error) { + return nil, errors.New("fakeFibre.Submit not implemented") +} + +func (f *fakeFibre) Upload(ctx context.Context, ns libshare.Namespace, data []byte, cfg *txclient.TxConfig) (*fibreapi.UploadResult, error) { + return f.uploadFn(ctx, ns, data, cfg) +} + +func (f *fakeFibre) Download(ctx context.Context, id appfibre.BlobID) (*fibreapi.GetBlobResult, error) { + return f.downloadFn(ctx, id) +} + +func (f *fakeFibre) QueryEscrowAccount(context.Context, string) (*celfibre.EscrowAccount, error) { + return nil, errors.New("fakeFibre.QueryEscrowAccount not implemented") +} + +func (f *fakeFibre) Deposit(context.Context, types.Coin, *txclient.TxConfig) error { + return errors.New("fakeFibre.Deposit not implemented") +} + +func (f *fakeFibre) Withdraw(context.Context, types.Coin, *txclient.TxConfig) error { + return errors.New("fakeFibre.Withdraw not implemented") +} + +func (f *fakeFibre) PendingWithdrawals(context.Context, string) ([]celfibre.PendingWithdrawal, error) { + return nil, errors.New("fakeFibre.PendingWithdrawals not implemented") +} + +// fakeBlob is a minimal stand-in for blobapi.Module. Only Subscribe is +// exercised by the adapter, so the rest return errors if called. +type fakeBlob struct { + subscribeFn func(context.Context, libshare.Namespace, uint64) (<-chan *nodeblob.SubscriptionResponse, error) +} + +var _ blobapi.Module = (*fakeBlob)(nil) + +func (b *fakeBlob) Submit(context.Context, []*nodeblob.Blob, *nodeblob.SubmitOptions) (uint64, error) { + return 0, errors.New("fakeBlob.Submit not implemented") +} + +func (b *fakeBlob) Get(context.Context, uint64, libshare.Namespace, nodeblob.Commitment) (*nodeblob.Blob, error) { + return nil, errors.New("fakeBlob.Get not implemented") +} + +func (b *fakeBlob) GetAll(context.Context, uint64, []libshare.Namespace) ([]*nodeblob.Blob, error) { + return nil, errors.New("fakeBlob.GetAll not implemented") +} + +func (b *fakeBlob) GetProof(context.Context, uint64, libshare.Namespace, nodeblob.Commitment) (*nodeblob.Proof, error) { + return nil, errors.New("fakeBlob.GetProof not implemented") +} + +func (b *fakeBlob) Included(context.Context, uint64, libshare.Namespace, *nodeblob.Proof, nodeblob.Commitment) (bool, error) { + return false, errors.New("fakeBlob.Included not implemented") +} + +func (b *fakeBlob) GetCommitmentProof(context.Context, uint64, libshare.Namespace, []byte) (*nodeblob.CommitmentProof, error) { + return nil, errors.New("fakeBlob.GetCommitmentProof not implemented") +} + +func (b *fakeBlob) Subscribe(ctx context.Context, ns libshare.Namespace, fromHeight uint64) (<-chan *nodeblob.SubscriptionResponse, error) { + return b.subscribeFn(ctx, ns, fromHeight) +} + +// TestAdapterSatisfiesDA is a compile-time assertion that the adapter +// implements the ev-node Fiber DA contract. +func TestAdapterSatisfiesDA(t *testing.T) { + var _ block.FiberClient = (*cnfiber.Adapter)(nil) +} + +func TestUpload_ForwardsNamespaceDataAndBlobID(t *testing.T) { + var commit appfibre.Commitment + copy(commit[:], bytes.Repeat([]byte{0x11}, appfibre.CommitmentSize)) + expectedBlobID := appfibre.NewBlobID(0, commit) + + var seenNs libshare.Namespace + var seenData []byte + var seenCfg *txclient.TxConfig + fibre := &fakeFibre{ + uploadFn: func(_ context.Context, ns libshare.Namespace, data []byte, cfg *txclient.TxConfig) (*fibreapi.UploadResult, error) { + seenNs = ns + seenData = data + seenCfg = cfg + return &fibreapi.UploadResult{BlobID: expectedBlobID}, nil + }, + } + a := cnfiber.FromModules(fibre, &fakeBlob{}, 0) + + data := []byte("hello fibre") + before := time.Now() + got, err := a.Upload(context.Background(), namespaceBytes(), data) + require.NoError(t, err) + require.Equal(t, block.FiberBlobID(expectedBlobID), got.BlobID) + require.Equal(t, namespace(t), seenNs) + require.Equal(t, data, seenData) + require.Nil(t, seenCfg, "adapter passes nil TxConfig to honour client defaults") + require.True(t, got.ExpiresAt.After(before.Add(time.Hour))) +} + +func TestUpload_RejectsWrongNamespaceLength(t *testing.T) { + a := cnfiber.FromModules(&fakeFibre{}, &fakeBlob{}, 0) + _, err := a.Upload(context.Background(), []byte{0x01, 0x02}, []byte("x")) + require.Error(t, err) +} + +func TestUpload_PropagatesFibreError(t *testing.T) { + fibreErr := errors.New("boom") + fibre := &fakeFibre{ + uploadFn: func(context.Context, libshare.Namespace, []byte, *txclient.TxConfig) (*fibreapi.UploadResult, error) { + return nil, fibreErr + }, + } + a := cnfiber.FromModules(fibre, &fakeBlob{}, 0) + _, err := a.Upload(context.Background(), namespaceBytes(), []byte("x")) + require.ErrorIs(t, err, fibreErr) +} + +func TestDownload_ReturnsResultData(t *testing.T) { + var commit appfibre.Commitment + copy(commit[:], bytes.Repeat([]byte{0x22}, appfibre.CommitmentSize)) + id := appfibre.NewBlobID(0, commit) + payload := []byte("payload") + + var seenID appfibre.BlobID + fibre := &fakeFibre{ + downloadFn: func(_ context.Context, arg appfibre.BlobID) (*fibreapi.GetBlobResult, error) { + seenID = arg + return &fibreapi.GetBlobResult{Data: payload}, nil + }, + } + + a := cnfiber.FromModules(fibre, &fakeBlob{}, 0) + got, err := a.Download(context.Background(), block.FiberBlobID(id)) + require.NoError(t, err) + require.Equal(t, payload, got) + require.Equal(t, id, seenID) +} + +func TestListen_FiltersFibreOnlyAndEmitsEvent(t *testing.T) { + ns := namespace(t) + + // v0 (non-Fibre) — must be filtered out. + v0Lib, err := libshare.NewV0Blob(ns, []byte("pfb blob")) + require.NoError(t, err) + v0 := &nodeblob.Blob{Blob: v0Lib} + + // v2 (Fibre) — must be forwarded. libshare requires a 20-byte signer + // for v2 blobs; the content of the signer is irrelevant to the filter. + fibreCommit := bytes.Repeat([]byte{0x33}, appfibre.CommitmentSize) + signer := bytes.Repeat([]byte{0x01}, libshare.SignerSize) + v2Lib, err := libshare.NewV2Blob(ns, 0, fibreCommit, signer) + require.NoError(t, err) + v2 := &nodeblob.Blob{Blob: v2Lib} + + ch := make(chan *nodeblob.SubscriptionResponse, 1) + ch <- &nodeblob.SubscriptionResponse{ + Blobs: []*nodeblob.Blob{v0, v2}, + Height: 42, + } + close(ch) + + var seenNs libshare.Namespace + var seenFromHeight uint64 + blob := &fakeBlob{ + subscribeFn: func(_ context.Context, sub libshare.Namespace, fromHeight uint64) (<-chan *nodeblob.SubscriptionResponse, error) { + seenNs = sub + seenFromHeight = fromHeight + return ch, nil + }, + } + + // Listen now issues a Download per v2 blob to recover the original + // payload size for BlobEvent.DataSize (see Listen's doc comment). + // Feed a deterministic payload back; the test asserts DataSize + // matches its length. + originalPayload := []byte("this is the original blob payload") + fibre := &fakeFibre{ + downloadFn: func(_ context.Context, _ appfibre.BlobID) (*fibreapi.GetBlobResult, error) { + return &fibreapi.GetBlobResult{Data: originalPayload}, nil + }, + } + a := cnfiber.FromModules(fibre, blob, 0) + events, err := a.Listen(context.Background(), namespaceBytes(), 0) + require.NoError(t, err) + require.Equal(t, ns, seenNs) + require.Equal(t, uint64(0), seenFromHeight, "fromHeight=0 must be forwarded to blob.Subscribe") + + select { + case ev, ok := <-events: + require.True(t, ok, "expected one event before channel closes") + var expectedCommit appfibre.Commitment + copy(expectedCommit[:], fibreCommit) + expectedID := appfibre.NewBlobID(0, expectedCommit) + require.Equal(t, block.FiberBlobID(expectedID), ev.BlobID) + require.Equal(t, uint64(42), ev.Height) + require.Equal(t, uint64(len(originalPayload)), ev.DataSize, + "DataSize must match the original payload length resolved via Download") + case <-time.After(time.Second): + t.Fatal("timed out waiting for blob event") + } + + select { + case _, ok := <-events: + require.False(t, ok, "expected adapter channel to close after upstream close") + case <-time.After(time.Second): + t.Fatal("timed out waiting for output channel to close") + } +} + +func TestListen_CancelledContextClosesOutput(t *testing.T) { + ns := namespace(t) + upstream := make(chan *nodeblob.SubscriptionResponse) + blob := &fakeBlob{ + subscribeFn: func(_ context.Context, arg libshare.Namespace, _ uint64) (<-chan *nodeblob.SubscriptionResponse, error) { + require.Equal(t, ns, arg) + return upstream, nil + }, + } + + ctx, cancel := context.WithCancel(context.Background()) + a := cnfiber.FromModules(&fakeFibre{}, blob, 0) + events, err := a.Listen(ctx, namespaceBytes(), 0) + require.NoError(t, err) + + cancel() + + select { + case _, ok := <-events: + require.False(t, ok, "expected adapter channel to close on ctx cancel") + case <-time.After(time.Second): + t.Fatal("timed out waiting for ctx-triggered close") + } +} diff --git a/tools/celestia-node-fiber/cmd/evnode-fibre/main.go b/tools/celestia-node-fiber/cmd/evnode-fibre/main.go new file mode 100644 index 0000000000..7b8b73080f --- /dev/null +++ b/tools/celestia-node-fiber/cmd/evnode-fibre/main.go @@ -0,0 +1,553 @@ +// Command evnode-fibre runs a long-lived ev-node aggregator wired to +// a celestia-node-fiber adapter. It is the binary that ships to the +// ev-node instance during a `talis deploy` for the multi-FSP +// throughput experiment. +// +// Topology (smallest variant of the experiment): +// +// [ load-gen ] +// │ POST /tx +// ▼ +// [ evnode-fibre (this binary) aggregator + InMem executor ] +// │ block.NewFiberDAClient → cnfiber.New +// ▼ +// [ celestia-node bridge ] +// │ blob.Subscribe / blob.Submit +// ▼ +// [ Fibre Server (per validator) ] + [ celestia-app validators ] +// +// CLI flags map to talis/SSM-friendly env vars; everything that +// changes per-deploy can be set via flag *or* CELES_* env var. +// +// This binary is intentionally not part of the testapp tree — testapp +// is the canonical small-chain example and we don't want to drag the +// celestia-node-fiber adapter (with its celestia-node + celestia-app +// deps) into testapp's go.mod. By living under tools/celestia-node-fiber +// the runner reuses the adapter's existing dep set as-is. +package main + +import ( + "context" + "crypto/rand" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "path/filepath" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/rs/zerolog" + + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + "github.com/celestiaorg/celestia-node/api/client" + cnp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + + "github.com/evstack/ev-node/block" + coreexecution "github.com/evstack/ev-node/core/execution" + "github.com/evstack/ev-node/node" + "github.com/evstack/ev-node/pkg/config" + genesispkg "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/p2p" + "github.com/evstack/ev-node/pkg/p2p/key" + "github.com/evstack/ev-node/pkg/sequencers/solo" + pkgsigner "github.com/evstack/ev-node/pkg/signer" + "github.com/evstack/ev-node/pkg/signer/file" + "github.com/evstack/ev-node/pkg/store" + + cnfiber "github.com/evstack/ev-node/tools/celestia-node-fiber" +) + +type cliFlags struct { + homeDir string + chainID string + headerNS string + dataNS string + bridgeAddr string + bridgeTokenFp string + coreGRPCAddr string + coreNetwork string + keyringPath string + keyName string + signerPpFp string + httpListen string + rpcListen string + p2pListen string + pprofListen string + blockTime time.Duration + scrapeInterval time.Duration + logLevel string +} + +func parseFlags() cliFlags { + var c cliFlags + + flag.StringVar(&c.homeDir, "home", envOr("EVNODE_HOME", filepath.Join(os.Getenv("HOME"), ".evnode-fibre")), + "ev-node home directory (datastore, signer, node key live here)") + flag.StringVar(&c.chainID, "chain-id", envOr("CHAIN_ID", ""), + "app chain id (must match validators' chain id)") + flag.StringVar(&c.headerNS, "header-namespace", envOr("HEADER_NS", "ev-fib-ht"), + "DA header namespace string") + flag.StringVar(&c.dataNS, "data-namespace", envOr("DATA_NS", "ev-fib-da"), + "DA data namespace string") + flag.StringVar(&c.bridgeAddr, "bridge-addr", envOr("BRIDGE_ADDR", ""), + "celestia-node bridge RPC address (host:port, no scheme)") + flag.StringVar(&c.bridgeTokenFp, "bridge-token-file", envOr("BRIDGE_TOKEN_FILE", "/root/bridge-jwt.txt"), + "path to a file containing the bridge admin JWT") + flag.StringVar(&c.coreGRPCAddr, "core-grpc-addr", envOr("CORE_GRPC_ADDR", ""), + "celestia-app validator gRPC address (host:port) used by Fibre's submit path for state queries") + flag.StringVar(&c.coreNetwork, "core-network", envOr("CORE_NETWORK", "private"), + "celestia-node Network identifier (matches bridge's --p2p.network)") + flag.StringVar(&c.keyringPath, "keyring-path", envOr("KEYRING_PATH", ""), + "directory holding the cosmos-sdk file keyring with the Fibre payment account") + flag.StringVar(&c.keyName, "key-name", envOr("KEY_NAME", "default-fibre"), + "keyring entry name for the Fibre payment account") + flag.StringVar(&c.signerPpFp, "signer-passphrase-file", envOr("SIGNER_PASSPHRASE_FILE", ""), + "path to a file holding the file-backed signer passphrase") + flag.StringVar(&c.httpListen, "http-listen", envOr("HTTP_LISTEN", "0.0.0.0:7777"), + "listen addr for the tx-injection HTTP endpoint (POST /tx)") + flag.StringVar(&c.rpcListen, "rpc-listen", envOr("RPC_LISTEN", "0.0.0.0:7331"), + "ev-node RPC listen addr") + flag.StringVar(&c.p2pListen, "p2p-listen", envOr("P2P_LISTEN", "/ip4/0.0.0.0/tcp/7676"), + "libp2p listen address (kept up for shutdown symmetry; never gossips when Fiber is on)") + flag.StringVar(&c.pprofListen, "pprof-listen", envOr("PPROF_LISTEN", ""), + "if set (e.g. 127.0.0.1:6060), serve net/http/pprof on this addr — heap/goroutine/profile useful for diagnosing memory growth") + flag.DurationVar(&c.blockTime, "block-time", durFromEnv("BLOCK_TIME", 200*time.Millisecond), + "ev-node BlockTime") + flag.DurationVar(&c.scrapeInterval, "scrape-interval", durFromEnv("SCRAPE_INTERVAL", 100*time.Millisecond), + "reaper scrape interval (lower = smaller per-block batches)") + flag.StringVar(&c.logLevel, "log-level", envOr("LOG_LEVEL", "info"), "log level") + + flag.Parse() + return c +} + +func envOr(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +func durFromEnv(name string, def time.Duration) time.Duration { + if v := os.Getenv(name); v != "" { + if d, err := time.ParseDuration(v); err == nil { + return d + } + } + return def +} + +func main() { + cfg := parseFlags() + if err := run(cfg); err != nil { + fmt.Fprintln(os.Stderr, "fatal:", err) + os.Exit(1) + } +} + +func run(cli cliFlags) error { + // Validate the inputs that have no sensible default. + if cli.chainID == "" { + return errors.New("--chain-id is required") + } + if cli.bridgeAddr == "" { + return errors.New("--bridge-addr is required") + } + if cli.coreGRPCAddr == "" { + return errors.New("--core-grpc-addr is required (validator-0 gRPC, host:port)") + } + if cli.keyringPath == "" { + return errors.New("--keyring-path is required") + } + + level, err := zerolog.ParseLevel(cli.logLevel) + if err != nil { + level = zerolog.InfoLevel + } + zerolog.SetGlobalLevel(level) + logger := zerolog.New(os.Stderr).With().Timestamp().Str("component", "evnode-fibre").Logger() + + if err := os.MkdirAll(cli.homeDir, 0o755); err != nil { + return fmt.Errorf("create home dir: %w", err) + } + + // Bridge JWT: read once at startup. The bridge_init.sh writes it + // to /root/bridge-jwt.txt on the bridge box; the ev-node init + // script scp's it onto this box at the same path by default. + authBytes, err := os.ReadFile(cli.bridgeTokenFp) + if err != nil { + return fmt.Errorf("read bridge token from %s: %w", cli.bridgeTokenFp, err) + } + authToken := string(authBytes) + for len(authToken) > 0 && (authToken[len(authToken)-1] == '\n' || authToken[len(authToken)-1] == '\r' || authToken[len(authToken)-1] == ' ') { + authToken = authToken[:len(authToken)-1] + } + + // Cosmos-sdk file keyring with the Fibre payment account. + // The deploy step copies this from a validator (or a dedicated + // pre-funded account) so the keyring already contains cli.keyName. + encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + kr, err := keyring.New(app.Name, keyring.BackendTest, cli.keyringPath, nil, encCfg.Codec) + if err != nil { + return fmt.Errorf("open keyring at %s: %w", cli.keyringPath, err) + } + if _, err := kr.Key(cli.keyName); err != nil { + return fmt.Errorf("keyring entry %q not found in %s: %w", cli.keyName, cli.keyringPath, err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Construct the celestia-node-fiber adapter. We don't override + // SubmitConfig.Fibre — the Fibre client defaults (UploadMemoryBudget + // 512 MiB, RPCTimeout 15 s) are sized for the FSP-side concurrency + // the validators can actually absorb. We tried bumping the budget + // to 4 GiB to allow more in-flight blobs; with 16 upload workers + // the FSPs couldn't keep up and the box OOM'd at 63.9 GB. Leaving + // the defaults in place means the upload pipeline self-bounds at + // roughly what the FSPs can sustain. + adapter, err := cnfiber.New(ctx, cnfiber.Config{ + Client: client.Config{ + ReadConfig: client.ReadConfig{ + BridgeDAAddr: cli.bridgeAddr, + DAAuthToken: authToken, + EnableDATLS: false, + }, + SubmitConfig: client.SubmitConfig{ + DefaultKeyName: cli.keyName, + Network: cnp2p.Network(cli.coreNetwork), + CoreGRPCConfig: client.CoreGRPCConfig{ + Addr: cli.coreGRPCAddr, + }, + }, + }, + }, kr) + if err != nil { + return fmt.Errorf("construct fiber adapter: %w", err) + } + defer adapter.Close() + + // File-backed signer for ev-node block-signing key (separate from + // the cosmos-sdk keyring used to sign Fibre payment promises). + signerDir := filepath.Join(cli.homeDir, "signer") + if err := os.MkdirAll(signerDir, 0o755); err != nil { + return fmt.Errorf("create signer dir: %w", err) + } + passphrase, err := readSignerPassphrase(cli.signerPpFp) + if err != nil { + return fmt.Errorf("read signer passphrase: %w", err) + } + if !signerExists(signerDir) { + if _, err := file.CreateFileSystemSigner(signerDir, []byte(passphrase)); err != nil { + return fmt.Errorf("create file signer: %w", err) + } + } + + // Generate a libp2p node key. With the syncer's P2P worker gated + // off in Fiber mode, this key is mostly cosmetic — the host comes + // up but never gossips. Keeping it ephemeral per restart is fine. + nodePrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return fmt.Errorf("generate libp2p key: %w", err) + } + nodeKey := &key.NodeKey{PrivKey: nodePrivKey} + + // File signer factory needs the address before genesis is built, + // so construct it here and read the address back. + fs, err := file.LoadFileSystemSigner(signerDir, []byte(passphrase)) + if err != nil { + return fmt.Errorf("load signer: %w", err) + } + signerAddr, err := fs.GetAddress() + if err != nil { + return fmt.Errorf("get signer address: %w", err) + } + + // Fresh genesis per-run is fine: the chain we're talking to via + // Fibre is the celestia-app testnet; ev-node's own genesis is + // self-consistent and never gossiped. + genesis := genesispkg.NewGenesis(cli.chainID, 1, time.Now(), signerAddr) + if err := genesis.Validate(); err != nil { + return fmt.Errorf("validate genesis: %w", err) + } + + cfg := config.DefaultConfig() + cfg.RootDir = cli.homeDir + cfg.DBPath = "data" + cfg.Node.Aggregator = true + cfg.Node.BlockTime = config.DurationWrapper{Duration: cli.blockTime} + cfg.Node.LazyMode = false + cfg.Node.ScrapeInterval = config.DurationWrapper{Duration: cli.scrapeInterval} + cfg.DA.Namespace = cli.headerNS + cfg.DA.DataNamespace = cli.dataNS + cfg.DA.Fiber.Enabled = true + cfg.DA.Fiber.ConsensusAddress = cli.coreGRPCAddr + cfg.DA.Fiber.ConsensusChainID = cli.chainID + cfg.DA.Fiber.BridgeAddress = cli.bridgeAddr + cfg.DA.Fiber.KeyringPath = cli.keyringPath + cfg.DA.Fiber.KeyName = cli.keyName + cfg.DA.RequestTimeout = config.DurationWrapper{Duration: 60 * time.Second} + // Fiber-tuned profile: BatchingStrategy=adaptive, BatchMaxDelay=1.5s, + // DA.BlockTime=1s, MaxPendingHeadersAndData=0, plus 120 MiB blob cap. + cfg.ApplyFiberDefaults() + block.SetMaxBlobSize(120 * 1024 * 1024) + cfg.P2P.ListenAddress = cli.p2pListen + cfg.P2P.DisableConnectionGater = true + cfg.RPC.Address = cli.rpcListen + cfg.Log.Level = cli.logLevel + cfg.Signer.SignerType = "file" + cfg.Signer.SignerPath = signerDir + + signer, err := pkgsigner.NewSigner(ctx, &cfg, passphrase) + if err != nil { + return fmt.Errorf("construct signer via factory: %w", err) + } + + ds, err := store.NewDefaultKVStore(cli.homeDir, cfg.DBPath, "evnode-fibre") + if err != nil { + return fmt.Errorf("create datastore: %w", err) + } + + executor := newInMemExecutor() + sequencer := solo.NewSoloSequencer(logger, []byte(genesis.ChainID), executor) + daClient := block.NewFiberDAClient(adapter, cfg, logger, 0) + p2pClient, err := p2p.NewClient(cfg.P2P, nodeKey.PrivKey, datastore.NewMapDatastore(), genesis.ChainID, logger, nil) + if err != nil { + return fmt.Errorf("create p2p client: %w", err) + } + + rollnode, err := node.NewNode( + cfg, + executor, + sequencer, + daClient, + signer, + p2pClient, + genesis, + ds, + node.DefaultMetricsProvider(cfg.Instrumentation), + logger, + node.NodeOptions{}, + ) + if err != nil { + return fmt.Errorf("create ev-node: %w", err) + } + + // pprof on a separate listener (off by default). The `_ "net/http/pprof"` + // import registers handlers on http.DefaultServeMux; we serve that + // mux on cli.pprofListen so heap / goroutine / profile dumps don't + // share a port with the tx-ingress mux. Used to diagnose where the + // daemon's RSS goes — the AWS run held ~49 GiB at steady state and + // we don't yet have a breakdown. + if cli.pprofListen != "" { + pprofSrv := &http.Server{Addr: cli.pprofListen, ReadHeaderTimeout: 5 * time.Second} + go func() { + logger.Info().Str("addr", cli.pprofListen).Msg("starting pprof HTTP server") + if err := pprofSrv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + logger.Warn().Err(err).Msg("pprof server exited") + } + }() + } + + // HTTP tx ingestion endpoint. + httpServer := &http.Server{ + Addr: cli.httpListen, + Handler: txIngressHandler(executor, logger), + } + httpDone := make(chan error, 1) + go func() { + logger.Info().Str("addr", cli.httpListen).Msg("starting tx-ingress HTTP server") + err := httpServer.ListenAndServe() + if err != nil && !errors.Is(err, http.ErrServerClosed) { + httpDone <- err + } else { + httpDone <- nil + } + }() + + // Run the node and trap signals. + nodeDone := make(chan error, 1) + go func() { + nodeDone <- rollnode.Run(ctx) + }() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + select { + case <-sigCh: + logger.Info().Msg("signal received, shutting down") + case err := <-nodeDone: + if err != nil { + logger.Error().Err(err).Msg("ev-node exited with error") + } + case err := <-httpDone: + logger.Error().Err(err).Msg("HTTP server exited") + } + cancel() + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownCancel() + _ = httpServer.Shutdown(shutdownCtx) + + return nil +} + +func readSignerPassphrase(path string) (string, error) { + if path == "" { + // Default: deterministic-but-non-empty passphrase. This is a + // long-lived testnet daemon, not a custodial wallet. + return "evnode-fibre-passphrase", nil + } + b, err := os.ReadFile(path) + if err != nil { + return "", err + } + s := string(b) + for len(s) > 0 && (s[len(s)-1] == '\n' || s[len(s)-1] == '\r' || s[len(s)-1] == ' ') { + s = s[:len(s)-1] + } + if s == "" { + return "", errors.New("passphrase file is empty") + } + return s, nil +} + +func signerExists(dir string) bool { + _, err := os.Stat(filepath.Join(dir, "signer.json")) + return err == nil +} + +// ─────────────────────────── tx ingress ──────────────────────────────── + +func txIngressHandler(exec *inMemExecutor, logger zerolog.Logger) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("/tx", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + w.Header().Set("Allow", "POST") + http.Error(w, "POST only", http.StatusMethodNotAllowed) + return + } + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if len(body) == 0 { + http.Error(w, "empty body", http.StatusBadRequest) + return + } + exec.InjectTx(body) + w.WriteHeader(http.StatusAccepted) + }) + mux.HandleFunc("/stats", func(w http.ResponseWriter, _ *http.Request) { + s := exec.Stats() + fmt.Fprintf(w, "blocks=%d txs=%d\n", s.BlocksProduced, s.TotalExecutedTxs) + }) + return mux +} + +// ─────────────────────── in-memory executor ──────────────────────────── +// +// Mirrors the test executor in tools/celestia-node-fiber/testing — accepts +// arbitrary tx bytes, drains a buffered channel into blocks, and tracks +// counts for /stats. Not a real chain; just a generic blob factory for +// the experiment. + +type inMemExecutor struct { + mu sync.Mutex + txChan chan []byte + maxBlockTxs int + blocks atomic.Uint64 + totalTxs atomic.Uint64 +} + +// txChan capacity caps in-flight memory: at 10 KB tx and 500 slots +// we hold ≤ 5 MB queued before /tx blocks the ingress goroutine — +// which is exactly the backpressure we want against a hot loadgen. +// Reaper drains every 100 ms into the solo sequencer, which then +// accumulates batches between block-production ticks; without a tight +// cap a single block can balloon past the 120 MiB DA blob limit and +// the rest of the daemon's per-block allocations push the box past +// its RAM budget within seconds. +// +// maxBlockTxs caps GetTxs's per-call return so reaper-cycle batches +// are bounded too. With 500 ≤ 5 MB per block at 10 KB tx-size, we +// stay an order of magnitude under the DA cap so headers/data signing +// + envelope cache + retry buffers all fit. +func newInMemExecutor() *inMemExecutor { + return &inMemExecutor{ + txChan: make(chan []byte, 500), + maxBlockTxs: 500, + } +} + +func (e *inMemExecutor) InjectTx(tx []byte) { + e.txChan <- tx +} + +type execStats struct { + BlocksProduced uint64 + TotalExecutedTxs uint64 +} + +func (e *inMemExecutor) Stats() execStats { + return execStats{ + BlocksProduced: e.blocks.Load(), + TotalExecutedTxs: e.totalTxs.Load(), + } +} + +func (e *inMemExecutor) InitChain(_ context.Context, _ time.Time, _ uint64, _ string) ([]byte, error) { + return []byte("evnode-fibre-genesis-root"), nil +} + +func (e *inMemExecutor) GetTxs(_ context.Context) ([][]byte, error) { + var txs [][]byte + for len(txs) < e.maxBlockTxs { + select { + case tx := <-e.txChan: + txs = append(txs, tx) + default: + return txs, nil + } + } + return txs, nil +} + +func (e *inMemExecutor) ExecuteTxs(_ context.Context, txs [][]byte, height uint64, _ time.Time, _ []byte) ([]byte, error) { + e.blocks.Add(1) + e.totalTxs.Add(uint64(len(txs))) + root := make([]byte, 32) + binary.BigEndian.PutUint64(root, height) + binary.BigEndian.PutUint64(root[8:], uint64(len(txs))) + return root, nil +} + +func (e *inMemExecutor) SetFinal(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) Rollback(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) GetExecutionInfo(_ context.Context) (coreexecution.ExecutionInfo, error) { + return coreexecution.ExecutionInfo{MaxGas: 0}, nil +} + +func (e *inMemExecutor) FilterTxs(_ context.Context, txs [][]byte, _, _ uint64, _ bool) ([]coreexecution.FilterStatus, error) { + st := make([]coreexecution.FilterStatus, len(txs)) + for i := range st { + st[i] = coreexecution.FilterOK + } + return st, nil +} + +var _ coreexecution.Executor = (*inMemExecutor)(nil) diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/README.md b/tools/celestia-node-fiber/cmd/fiber-bench/README.md new file mode 100644 index 0000000000..d99419f6e8 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/README.md @@ -0,0 +1,187 @@ +# fiber-bench + +Single-sequencer ev-node throughput bench against a remote Fibre network. + +## What it is + +A self-contained binary that spins up an ev-node aggregator wired to a +remote Fibre network (no bridge node, no P2P, no syncer, no +state-machine cost) and pumps transactions into its mempool as fast as +the configured backpressure allows. + +The intent is a fail-fast baseline so we can isolate ev-node's +batching + DA-submit pipeline from everything else when chasing the +1k tps regression. + +### What's stripped out (and why) + +| Stripped | Why | +|----------------|------------------------------------------------------------| +| Bridge node | Upload only needs consensus gRPC + FSPs. | +| Syncer | Aggregator-only single-node setup. | +| P2P outbound | ev-node already disables it when `da.fiber.enabled=true`. | +| Forced incl. | Solo sequencer. | +| Real state machine | Constant state root — measure ev-node, not state cost. | +| HTTP tx ingress | Direct `InjectTx`. Removes HTTP from the hot path. | + +## Layout + +``` +tools/celestia-node-fiber/cmd/fiber-bench/ + main.go cobra root + keys.go cosmos keyring management (test backend) + escrow.go Fibre escrow deposit/query + run.go the bench + executor.go in-mem core.Executor with constant state root + loader.go internal tx pump + stats.go periodic stats line + final baseline summary + fibre.go bridge-bypass cnfiber.Adapter constructor + run-bench.sh convenience wrapper +``` + +## Quick start + +```sh +cd tools/celestia-node-fiber + +# 1. Build — the `fibre` build tag is REQUIRED so celestia-app's +# x/fibre messages (MsgPayForFibre, MsgDepositToEscrow) are registered +# in the codec. Without it the async PFF settlement fails with +# "unable to resolve type URL /celestia.fibre.v1.MsgPayForFibre". +go build -tags fibre -o bin/fiber-bench ./cmd/fiber-bench/ + +# 2. Create the bench key (cosmos keyring, test backend = unencrypted on disk) +./bin/fiber-bench keys add bench +# prints: address: celestia1... +# mnemonic: ... + +# 3. Top up the printed address with utia on the chain (out of band). + +# 4. Deposit into the Fibre escrow +./bin/fiber-bench escrow deposit \ + --consensus-grpc 139.59.229.101:9091 \ + --key-name bench \ + --amount 50000000 # 50 TIA + +# 5. Sanity check +./bin/fiber-bench escrow query \ + --consensus-grpc 139.59.229.101:9091 \ + --key-name bench + +# 6. Run the bench +./bin/fiber-bench run \ + --evnode.da.fiber.consensus_address 139.59.229.101:9091 \ + --evnode.da.fiber.consensus_chain_id \ + --evnode.da.fiber.key_name bench \ + --duration 2m \ + --workers 32 \ + --tx-size 200 \ + --evnode.node.block_time 1s \ + --evnode.da.batching_strategy immediate +``` + +The bench reuses canonical ev-node flags (`--evnode.*`) registered by +`pkg/config.AddFlags` rather than defining bench-specific aliases. See +`fiber-bench run --help` for the full list — anything you'd configure on +testapp/evm/grpc apps works here too. + +Or use the convenience wrapper: + +```sh +CONSENSUS_GRPC=139.59.229.101:9091 \ +CHAIN_ID=talis-slab-diag \ + ./cmd/fiber-bench/run-bench.sh 2m 32 +``` + +## What the run prints + +A header, then one line per `--stats-interval` (default 1s): + +``` +elapsed injected inj/s exec/s blocks/s committed_h txs/blk blob_bytes pending drops +------------------------------------------------------------------------------------------------------ +1s 1452609 1452212 0 0.00 0 0.0 0 0 293116 +2s 1544094 91444 0 0.00 0 0.0 0 0 1007270 +``` + +Columns: + +- `injected` — total txs the load generator has called `InjectTx` for +- `inj/s` — injection rate over the last interval +- `exec/s` — txs included in produced blocks (rate) +- `blocks/s` — block production rate +- `committed_h` — last block height confirmed by DA (0 until first + Upload settles) +- `txs/blk` — running average over all blocks +- `blob_bytes` — last block's data size in bytes +- `pending` — `evnode_da_submitter_pending_blobs` gauge +- `drops` — txs the load generator could not enqueue because the + in-mem mempool channel was full (this is the backpressure signal) + +At the end: + +``` +============================================================ + BASELINE SUMMARY +============================================================ +Duration: 2m0s +Injected: XXX (avg N tx/s, peak N tx/s) +Dropped (mempool full): XXX +Mempool high-water: XXX +Blocks produced: XXX (committed_h=YYY) +Txs executed: XXX (avg N tx/s, peak N tx/s, T tx/blk) +============================================================ +``` + +## Knobs worth flipping while debugging + +Bench-local flags: + +| Flag | Default | Why | +|-------------------------|--------------|---------------------------------------------------| +| `--workers` | `32` | Tx-injection concurrency | +| `--tx-size` | `200` | Bytes per tx (matches user-reported regression) | +| `--mempool-size` | `1_000_000` | Bench's bounded backpressure boundary | +| `--keep-home` | `false` | Resume from prior state (defaults to wipe) | +| `--duration` | `1m` | How long to run (0 = until SIGINT) | +| `--stats-interval` | `1s` | Stats line cadence | +| `--keyring-dir` | `~/.fiber-bench/keyring` | Cosmos keyring (Fibre payment promises) | +| `--signer-passphrase` | `fiber-bench-passphrase` | ev-node block-signing key passphrase | + +Canonical ev-node flags worth flipping (full list: `run --help`): + +| Flag | Bench default | Why | +|-------------------------------------|---------------|--------------------------------------| +| `--evnode.node.block_time` | `1s` | Drop to `100ms` to expose per-block overhead | +| `--evnode.da.batching_strategy` | `immediate` | Try `time` / `size` / `adaptive` | +| `--evnode.node.scrape_interval` | `100ms` | How often the mempool drain runs | +| `--evnode.node.max_pending_headers_and_data` | `0` | Cap pending DA blobs to test backpressure | +| `--evnode.log.level` | `info` | `debug` to see ev-node block production logs | + +## ev-node Prometheus + +When `--prometheus=true` (default), ev-node exposes metrics at +`http://127.0.0.1:26660/metrics`. The bench scrapes a handful of them +for its stats line, but you can hit the endpoint directly for the full +picture: `evnode_block_production_duration_seconds`, +`evnode_da_submitter_failures_total`, etc. + +## Operational notes + +- **Test-backend keyring**: keys live unencrypted on disk under + `~/.fiber-bench/keyring`. Fine for a bench account funded with a + small amount of utia. Don't use for anything else. +- **The bench wipes its ev-node home (`~/.fiber-bench/node`) on every + run** unless `--keep-home` is passed. Block-signing key, store, and + any in-flight pending blocks all reset. The cosmos keyring is + separate and is preserved. +- **Bridge bypass**: the bench builds the `cnfiber.Adapter` via + `cnfiber.FromModules` with a stub Blob module that errors on every + call. The aggregator-only setup never invokes Listen/Subscribe, so + this is safe; if the assumption breaks, you'll see a clear + `fiber-bench: blob module not supported` error rather than a nil + panic. +- **Chain ID** is what the consensus node reports; the bench logs it + on startup. Pass the same value via `--chain-id` for config + validation; mismatch is logged but tx submission proceeds against + the chain's actual ID. diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/escrow.go b/tools/celestia-node-fiber/cmd/fiber-bench/escrow.go new file mode 100644 index 0000000000..89e55e25c9 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/escrow.go @@ -0,0 +1,165 @@ +package main + +import ( + "context" + "fmt" + "time" + + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + "github.com/celestiaorg/celestia-app/v9/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v9/pkg/user" + fibretypes "github.com/celestiaorg/celestia-app/v9/x/fibre/types" +) + +// escrowCmd groups Fibre-escrow operations. Uploads consume utia from +// the signer's escrow account; without a funded escrow, every Upload on +// the bench will fail at the chain. +func escrowCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "escrow", + Short: "Manage Fibre escrow for the bench account", + } + cmd.AddCommand(escrowDepositCmd(), escrowQueryCmd()) + return cmd +} + +func escrowDepositCmd() *cobra.Command { + var ( + consensusGRPC string + keyringDir string + keyName string + amountUtia int64 + gasLimit uint64 + feeUtia uint64 + ) + cmd := &cobra.Command{ + Use: "deposit", + Short: "Deposit utia into the bench account's Fibre escrow", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithTimeout(cmd.Context(), 60*time.Second) + defer cancel() + + kr, err := openKeyring(keyringDir) + if err != nil { + return fmt.Errorf("open keyring: %w", err) + } + rec, err := kr.Key(keyName) + if err != nil { + return fmt.Errorf("key %q not found in %s: %w", keyName, keyringDir, err) + } + addr, err := rec.GetAddress() + if err != nil { + return fmt.Errorf("get address: %w", err) + } + + conn, err := grpc.NewClient(consensusGRPC, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return fmt.Errorf("dial grpc: %w", err) + } + defer conn.Close() + + ecfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + tc, err := user.SetupTxClient(ctx, kr, conn, ecfg, user.WithDefaultAccount(keyName)) + if err != nil { + return fmt.Errorf("setup tx client: %w", err) + } + + amount := sdk.NewCoin(appconsts.BondDenom, sdkmath.NewInt(amountUtia)) + msg := &fibretypes.MsgDepositToEscrow{ + Signer: addr.String(), + Amount: amount, + } + fmt.Printf("submitting MsgDepositToEscrow: signer=%s amount=%s\n", addr.String(), amount.String()) + resp, err := tc.SubmitTx(ctx, []sdk.Msg{msg}, user.SetGasLimit(gasLimit), user.SetFee(feeUtia)) + if err != nil { + return fmt.Errorf("submit tx: %w", err) + } + if resp.Code != 0 { + return fmt.Errorf("deposit tx failed: code=%d codespace=%s", resp.Code, resp.Codespace) + } + fmt.Printf("deposit included: height=%d txhash=%s\n", resp.Height, resp.TxHash) + + // Sanity: read the escrow back so the operator sees the + // new balance immediately. + qc := fibretypes.NewQueryClient(conn) + res, err := qc.EscrowAccount(ctx, &fibretypes.QueryEscrowAccountRequest{Signer: addr.String()}) + if err != nil { + fmt.Printf("(could not query escrow back: %v)\n", err) + return nil + } + if !res.Found { + fmt.Println("(escrow not found after deposit — chain may need another block)") + return nil + } + fmt.Printf("escrow balance: %s\n", res.EscrowAccount.Balance.String()) + return nil + }, + } + cmd.Flags().StringVar(&consensusGRPC, "consensus-grpc", "", "celestia-app gRPC address (host:port). Required.") + cmd.Flags().StringVar(&keyringDir, "keyring-dir", defaultKeyringDir(), "directory holding the bench keyring") + cmd.Flags().StringVar(&keyName, "key-name", "default", "key in the keyring to deposit from") + cmd.Flags().Int64Var(&amountUtia, "amount", 50_000_000, "amount in utia to deposit (default 50 TIA)") + cmd.Flags().Uint64Var(&gasLimit, "gas-limit", 200_000, "tx gas limit") + cmd.Flags().Uint64Var(&feeUtia, "fee", 5_000, "fee in utia") + _ = cobra.MarkFlagRequired(cmd.Flags(), "consensus-grpc") + return cmd +} + +func escrowQueryCmd() *cobra.Command { + var ( + consensusGRPC string + keyringDir string + keyName string + ) + cmd := &cobra.Command{ + Use: "query", + Short: "Print the current Fibre escrow balance for the bench account", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithTimeout(cmd.Context(), 30*time.Second) + defer cancel() + + kr, err := openKeyring(keyringDir) + if err != nil { + return err + } + rec, err := kr.Key(keyName) + if err != nil { + return fmt.Errorf("key %q not found: %w", keyName, err) + } + addr, err := rec.GetAddress() + if err != nil { + return err + } + + conn, err := grpc.NewClient(consensusGRPC, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + defer conn.Close() + + qc := fibretypes.NewQueryClient(conn) + res, err := qc.EscrowAccount(ctx, &fibretypes.QueryEscrowAccountRequest{Signer: addr.String()}) + if err != nil { + return err + } + if !res.Found { + fmt.Printf("address: %s\nescrow: not found (deposit first)\n", addr.String()) + return nil + } + fmt.Printf("address: %s\nescrow: %s\n", addr.String(), res.EscrowAccount.Balance.String()) + return nil + }, + } + cmd.Flags().StringVar(&consensusGRPC, "consensus-grpc", "", "celestia-app gRPC address. Required.") + cmd.Flags().StringVar(&keyringDir, "keyring-dir", defaultKeyringDir(), "keyring directory") + cmd.Flags().StringVar(&keyName, "key-name", "default", "key name") + _ = cobra.MarkFlagRequired(cmd.Flags(), "consensus-grpc") + return cmd +} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/executor.go b/tools/celestia-node-fiber/cmd/fiber-bench/executor.go new file mode 100644 index 0000000000..088d21ffef --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/executor.go @@ -0,0 +1,156 @@ +package main + +import ( + "context" + "sync/atomic" + "time" + + coreexecution "github.com/evstack/ev-node/core/execution" +) + +// inMemExecutor is a minimal core.Executor that: +// - accepts injected txs via a buffered channel (the "mempool") +// - drains them in GetTxs (non-blocking) +// - "executes" by counting (no state machine) +// - returns a constant state root, so we don't pay O(N) state-root cost on +// every block (which would dominate the measurement and tell us nothing +// about ev-node's batching/submitting performance). +// +// Use FilterTxs's size cap to enforce the configured per-block byte budget. +type inMemExecutor struct { + txCh chan []byte + + injected atomic.Uint64 + dropped atomic.Uint64 + blocksProduced atomic.Uint64 + totalExecutedTxs atomic.Uint64 + + // mempoolHigh tracks the maximum mempool depth observed (snapshot). + mempoolHigh atomic.Int64 + + // constStateRoot is what every block reports as its post-state. The + // measurement target is ev-node, not state computation. + constStateRoot []byte +} + +func newInMemExecutor(mempoolSize int) *inMemExecutor { + return &inMemExecutor{ + txCh: make(chan []byte, mempoolSize), + constStateRoot: []byte("fiber-bench-const-state-root"), + } +} + +// InjectTx is the bench's "mempool entry". Backpressures via channel +// capacity: full → drop and increment counter so the operator sees it. +func (e *inMemExecutor) InjectTx(tx []byte) bool { + select { + case e.txCh <- tx: + e.injected.Add(1) + // Loose mempool-depth high-water; not a hot-path concern. + if d := int64(len(e.txCh)); d > e.mempoolHigh.Load() { + e.mempoolHigh.Store(d) + } + return true + default: + e.dropped.Add(1) + return false + } +} + +func (e *inMemExecutor) MempoolDepth() int { return len(e.txCh) } + +func (e *inMemExecutor) Stats() (injected, dropped, blocks, txs uint64, mempoolHigh int64) { + return e.injected.Load(), + e.dropped.Load(), + e.blocksProduced.Load(), + e.totalExecutedTxs.Load(), + e.mempoolHigh.Load() +} + +// InitChain is called once at genesis. +func (e *inMemExecutor) InitChain(_ context.Context, _ time.Time, _ uint64, _ string) ([]byte, error) { + return e.constStateRoot, nil +} + +// GetTxs drains the mempool channel. Non-blocking — returns whatever is +// currently buffered. ev-node's reaper polls this on its own cadence. +func (e *inMemExecutor) GetTxs(ctx context.Context) ([][]byte, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + n := len(e.txCh) + if n == 0 { + return nil, nil + } + txs := make([][]byte, 0, n) + for i := 0; i < n; i++ { + select { + case tx := <-e.txCh: + txs = append(txs, tx) + default: + return txs, nil + } + } + return txs, nil +} + +// ExecuteTxs is intentionally a no-op state transition: count txs, return +// a constant root. The whole point of this executor is to take state +// computation out of the measurement. +func (e *inMemExecutor) ExecuteTxs(_ context.Context, txs [][]byte, _ uint64, _ time.Time, _ []byte) ([]byte, error) { + e.blocksProduced.Add(1) + e.totalExecutedTxs.Add(uint64(len(txs))) + return e.constStateRoot, nil +} + +func (e *inMemExecutor) SetFinal(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) Rollback(_ context.Context, _ uint64) error { return nil } + +func (e *inMemExecutor) GetExecutionInfo(_ context.Context) (coreexecution.ExecutionInfo, error) { + // MaxGas=0 means "no gas-based filter"; the size cap (FilterTxs) is what + // bounds per-block bytes. + return coreexecution.ExecutionInfo{MaxGas: 0}, nil +} + +// FilterTxs enforces the configured per-block byte budget. Mirrors the +// existing testapp KV executor's behavior: oversized txs are dropped, the +// rest fill until the budget is hit and overflow is postponed for the +// next block. We don't validate tx content — txs from the load generator +// are well-formed by construction. +// +// We honor maxBytes as-is. Per-block proto/Metadata overhead is the +// responsibility of the block-size cap (now anchored to Fibre's actual +// MaxPayload in block/internal/common/consts.go), not the executor. +func (e *inMemExecutor) FilterTxs(_ context.Context, txs [][]byte, maxBytes, _ uint64, _ bool) ([]coreexecution.FilterStatus, error) { + out := make([]coreexecution.FilterStatus, len(txs)) + var used uint64 + limitReached := false + for i, tx := range txs { + size := uint64(len(tx)) + if size == 0 { + out[i] = coreexecution.FilterRemove + continue + } + if maxBytes > 0 && size > maxBytes { + out[i] = coreexecution.FilterRemove + continue + } + if limitReached { + out[i] = coreexecution.FilterPostpone + continue + } + if maxBytes > 0 && used+size > maxBytes { + limitReached = true + out[i] = coreexecution.FilterPostpone + continue + } + used += size + out[i] = coreexecution.FilterOK + } + return out, nil +} + +var _ coreexecution.Executor = (*inMemExecutor)(nil) diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/fibre.go b/tools/celestia-node-fiber/cmd/fiber-bench/fibre.go new file mode 100644 index 0000000000..3a366d0824 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/fibre.go @@ -0,0 +1,139 @@ +package main + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + appfibre "github.com/celestiaorg/celestia-app/v9/fibre" + libshare "github.com/celestiaorg/go-square/v4/share" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/fibre" + blobapi "github.com/celestiaorg/celestia-node/nodebuilder/blob" + nodebuilderfibre "github.com/celestiaorg/celestia-node/nodebuilder/fibre" + "github.com/celestiaorg/celestia-node/state/txclient" + + "github.com/evstack/ev-node/block" + cnfiber "github.com/evstack/ev-node/tools/celestia-node-fiber" +) + +// buildFibreAdapter constructs a celestia-node-fiber Adapter that talks +// directly to consensus gRPC + FSPs — no bridge node hop. We do this by +// rebuilding only the submit-side wiring of celestia-node's api/client +// (which is otherwise eager about dialing BridgeDAAddr in NewReadClient). +// +// The returned adapter only supports Upload (and Download via FSPs). +// Listen would invoke a stub blob.Subscribe that returns an error; +// ev-node's aggregator-only setup never calls it (no syncer, no based +// sequencer), so this is fine. +// +// The returned closer releases the gRPC connection and stops the +// underlying app-level fibre client. +func buildFibreAdapter( + ctx context.Context, + consensusGRPC string, + keyName string, + kr keyring.Keyring, +) (block.FiberClient, func() error, error) { + if consensusGRPC == "" { + return nil, nil, errors.New("consensus gRPC address is required") + } + if keyName == "" { + return nil, nil, errors.New("key name is required") + } + + conn, err := grpc.NewClient( + consensusGRPC, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return nil, nil, fmt.Errorf("dial consensus grpc %q: %w", consensusGRPC, err) + } + + tc, err := txclient.NewTxClient(kr, keyName, conn) + if err != nil { + _ = conn.Close() + return nil, nil, fmt.Errorf("new tx client: %w", err) + } + if err := tc.Start(ctx); err != nil { + _ = conn.Close() + return nil, nil, fmt.Errorf("start tx client: %w", err) + } + + appCfg := appfibre.DefaultClientConfig() + appCfg.DefaultKeyName = keyName + appCfg.StateAddress = conn.Target() + appClient, err := appfibre.NewClient(kr, appCfg) + if err != nil { + _ = tc.Stop(ctx) + _ = conn.Close() + return nil, nil, fmt.Errorf("new app fibre client: %w", err) + } + if err := appClient.Start(ctx); err != nil { + _ = tc.Stop(ctx) + _ = conn.Close() + return nil, nil, fmt.Errorf("start app fibre client: %w", err) + } + + accClient := fibre.NewAccountClient(tc, conn) + svc := fibre.NewService(appClient, tc, accClient) + module := nodebuilderfibre.NewModule(svc) + + adapter := cnfiber.FromModules(module, noBridgeBlob{}, 0) + + closer := func() error { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + var errs error + if err := appClient.Stop(stopCtx); err != nil { + errs = errors.Join(errs, err) + } + if err := tc.Stop(stopCtx); err != nil { + errs = errors.Join(errs, err) + } + if err := conn.Close(); err != nil { + errs = errors.Join(errs, err) + } + return errs + } + + return adapter, closer, nil +} + +// noBridgeBlob errors on every call. The only path that would invoke it +// is Listen→Subscribe, which our aggregator-only single-sequencer node +// never reaches. A clear error here surfaces an assumption break instead +// of a nil panic. +type noBridgeBlob struct{} + +var _ blobapi.Module = noBridgeBlob{} + +var errNoBridge = errors.New("fiber-bench: blob module not supported (running without a bridge node)") + +func (noBridgeBlob) Submit(context.Context, []*blob.Blob, *blob.SubmitOptions) (uint64, error) { + return 0, errNoBridge +} +func (noBridgeBlob) Get(context.Context, uint64, libshare.Namespace, blob.Commitment) (*blob.Blob, error) { + return nil, errNoBridge +} +func (noBridgeBlob) GetAll(context.Context, uint64, []libshare.Namespace) ([]*blob.Blob, error) { + return nil, errNoBridge +} +func (noBridgeBlob) GetProof(context.Context, uint64, libshare.Namespace, blob.Commitment) (*blob.Proof, error) { + return nil, errNoBridge +} +func (noBridgeBlob) Included(context.Context, uint64, libshare.Namespace, *blob.Proof, blob.Commitment) (bool, error) { + return false, errNoBridge +} +func (noBridgeBlob) GetCommitmentProof(context.Context, uint64, libshare.Namespace, []byte) (*blob.CommitmentProof, error) { + return nil, errNoBridge +} +func (noBridgeBlob) Subscribe(context.Context, libshare.Namespace, uint64) (<-chan *blob.SubscriptionResponse, error) { + return nil, errNoBridge +} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/instrumented.go b/tools/celestia-node-fiber/cmd/fiber-bench/instrumented.go new file mode 100644 index 0000000000..a53d0eddcf --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/instrumented.go @@ -0,0 +1,142 @@ +package main + +import ( + "context" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/evstack/ev-node/block" +) + +// instrumentedAdapter wraps a block.FiberClient and records latency +// per Upload (and per Download) call. The bench's stats printer +// reads percentiles from here so we can answer "is the bottleneck +// ev-node's submitter serialization, or actual Fibre Upload time?". +// +// We keep the last N samples in a ring buffer rather than an +// unbounded slice so a long run does not grow memory; N is sized for +// a 30-minute run at peak block rate. +type instrumentedAdapter struct { + inner block.FiberClient + + uploadCount atomic.Uint64 + uploadFailures atomic.Uint64 + uploadBytesSent atomic.Uint64 + + mu sync.Mutex + samples []time.Duration // ring buffer of recent durations + idx int // next slot to write + full bool // ring buffer has wrapped at least once +} + +const uploadSampleCapacity = 4096 + +func newInstrumentedAdapter(inner block.FiberClient) *instrumentedAdapter { + return &instrumentedAdapter{ + inner: inner, + samples: make([]time.Duration, uploadSampleCapacity), + } +} + +func (m *instrumentedAdapter) Head(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (a *instrumentedAdapter) Upload(ctx context.Context, namespace []byte, data []byte) (block.FiberUploadResult, error) { + start := time.Now() + res, err := a.inner.Upload(ctx, namespace, data) + elapsed := time.Since(start) + + a.uploadCount.Add(1) + if err != nil { + a.uploadFailures.Add(1) + } else { + a.uploadBytesSent.Add(uint64(len(data))) + } + + a.mu.Lock() + a.samples[a.idx] = elapsed + a.idx = (a.idx + 1) % len(a.samples) + if a.idx == 0 { + a.full = true + } + a.mu.Unlock() + + return res, err +} + +func (a *instrumentedAdapter) Download(ctx context.Context, blobID block.FiberBlobID) ([]byte, error) { + return a.inner.Download(ctx, blobID) +} + +func (a *instrumentedAdapter) Listen(ctx context.Context, namespace []byte, fromHeight uint64) (<-chan block.FiberBlobEvent, error) { + return a.inner.Listen(ctx, namespace, fromHeight) +} + +// uploadStats returns snapshot p50, p99, mean of recent Upload +// durations plus cumulative counters. Returns zero durations when +// no samples have been recorded yet. +type uploadStats struct { + Count uint64 + Failures uint64 + BytesOK uint64 + P50 time.Duration + P99 time.Duration + Mean time.Duration + Max time.Duration +} + +func (a *instrumentedAdapter) uploadStats() uploadStats { + a.mu.Lock() + var n int + if a.full { + n = len(a.samples) + } else { + n = a.idx + } + if n == 0 { + a.mu.Unlock() + return uploadStats{ + Count: a.uploadCount.Load(), + Failures: a.uploadFailures.Load(), + BytesOK: a.uploadBytesSent.Load(), + } + } + // Copy under lock so we can sort outside it. + cp := make([]time.Duration, n) + copy(cp, a.samples[:n]) + a.mu.Unlock() + + sort.Slice(cp, func(i, j int) bool { return cp[i] < cp[j] }) + + var sum time.Duration + for _, d := range cp { + sum += d + } + + pct := func(p float64) time.Duration { + idx := int(float64(n-1) * p) + if idx < 0 { + idx = 0 + } + if idx >= n { + idx = n - 1 + } + return cp[idx] + } + + return uploadStats{ + Count: a.uploadCount.Load(), + Failures: a.uploadFailures.Load(), + BytesOK: a.uploadBytesSent.Load(), + P50: pct(0.50), + P99: pct(0.99), + Mean: sum / time.Duration(n), + Max: cp[n-1], + } +} + +// Compile-time guard: must satisfy the same interface ev-node consumes. +var _ block.FiberClient = (*instrumentedAdapter)(nil) diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/keys.go b/tools/celestia-node-fiber/cmd/fiber-bench/keys.go new file mode 100644 index 0000000000..7b1e83ce93 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/keys.go @@ -0,0 +1,153 @@ +package main + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cobra" +) + +// openKeyring opens (or creates if missing) a test-backend keyring at +// keyringDir. The "test" backend is unencrypted on disk — fine for a +// bench account, not fine for anything mainnet. +func openKeyring(keyringDir string) (keyring.Keyring, error) { + interfaceRegistry := types.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(interfaceRegistry) + cdc := codec.NewProtoCodec(interfaceRegistry) + return keyring.New( + "fiber-bench", + keyring.BackendTest, + keyringDir, + nil, + cdc, + ) +} + +func keysCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "keys", + Short: "Manage the cosmos keyring used to sign Fibre payment promises", + } + cmd.AddCommand(keysAddCmd(), keysShowCmd(), keysListCmd()) + return cmd +} + +func keysAddCmd() *cobra.Command { + var keyringDir string + cmd := &cobra.Command{ + Use: "add ", + Short: "Create a new key in the bench keyring (test backend, unencrypted)", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + name := args[0] + kr, err := openKeyring(keyringDir) + if err != nil { + return fmt.Errorf("open keyring: %w", err) + } + + if rec, _ := kr.Key(name); rec != nil { + return fmt.Errorf("key %q already exists in keyring %s", name, keyringDir) + } + + rec, mnemonic, err := kr.NewMnemonic( + name, + keyring.English, + sdk.FullFundraiserPath, + keyring.DefaultBIP39Passphrase, + hd.Secp256k1, + ) + if err != nil { + return fmt.Errorf("create key: %w", err) + } + + addr, err := rec.GetAddress() + if err != nil { + return fmt.Errorf("get address: %w", err) + } + + fmt.Printf("name: %s\n", name) + fmt.Printf("address: %s\n", addr.String()) + fmt.Printf("keyring: %s (backend=test)\n", keyringDir) + fmt.Printf("\nmnemonic (back this up — printed once, never stored elsewhere):\n%s\n", mnemonic) + fmt.Printf("\nNext steps:\n") + fmt.Printf(" 1. Top up the address above with utia on the chain.\n") + fmt.Printf(" 2. Deposit into the Fibre escrow with celestia-appd or your tooling, e.g.\n") + fmt.Printf(" celestia-appd tx fibre deposit-escrow --from %s --keyring-backend test --keyring-dir %s --chain-id --node tcp://\n", name, keyringDir) + return nil + }, + } + cmd.Flags().StringVar(&keyringDir, "keyring-dir", defaultKeyringDir(), "directory to store keyring files (test backend)") + return cmd +} + +func keysShowCmd() *cobra.Command { + var keyringDir string + cmd := &cobra.Command{ + Use: "show ", + Short: "Print the address of an existing key", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + kr, err := openKeyring(keyringDir) + if err != nil { + return fmt.Errorf("open keyring: %w", err) + } + rec, err := kr.Key(args[0]) + if err != nil { + return fmt.Errorf("get key: %w", err) + } + addr, err := rec.GetAddress() + if err != nil { + return fmt.Errorf("get address: %w", err) + } + fmt.Printf("name: %s\n", args[0]) + fmt.Printf("address: %s\n", addr.String()) + fmt.Printf("keyring: %s (backend=test)\n", keyringDir) + return nil + }, + } + cmd.Flags().StringVar(&keyringDir, "keyring-dir", defaultKeyringDir(), "directory holding keyring files (test backend)") + return cmd +} + +func keysListCmd() *cobra.Command { + var keyringDir string + cmd := &cobra.Command{ + Use: "list", + Short: "List all keys in the bench keyring", + RunE: func(cmd *cobra.Command, args []string) error { + kr, err := openKeyring(keyringDir) + if err != nil { + return fmt.Errorf("open keyring: %w", err) + } + records, err := kr.List() + if err != nil { + return fmt.Errorf("list keys: %w", err) + } + if len(records) == 0 { + fmt.Printf("(empty — keyring at %s)\n", keyringDir) + return nil + } + for _, rec := range records { + addr, err := rec.GetAddress() + if err != nil { + return err + } + fmt.Printf("%-20s %s\n", rec.Name, addr.String()) + } + return nil + }, + } + cmd.Flags().StringVar(&keyringDir, "keyring-dir", defaultKeyringDir(), "directory holding keyring files (test backend)") + return cmd +} + +// silenceUnusedClient keeps the SDK client package referenced even if a +// future refactor stops using it directly — convenient when wiring a +// proper send/escrow command. +var _ = client.Context{} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/loader.go b/tools/celestia-node-fiber/cmd/fiber-bench/loader.go new file mode 100644 index 0000000000..4f527e8dc8 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/loader.go @@ -0,0 +1,89 @@ +package main + +import ( + "context" + "encoding/binary" + "sync" + "sync/atomic" + "time" +) + +// loaderBackoff is what each worker waits when InjectTx returns false +// because the mempool channel is full. A real sleep (rather than +// runtime.Gosched) caps the per-worker drop rate so allocation +// pressure scales with actual drain throughput; without it, full- +// mempool workers spin a tight allocate-then-drop loop at ~200k +// iter/s/worker — millions of short-lived slices per second across the +// pool, which drove the OOM kills we hit early in the investigation. +// 100 µs caps each worker at ~10k drops/s when the mempool is +// permanently full. +const loaderBackoff = 100 * time.Microsecond + +// loader pumps fixed-size payloads into the in-mem executor as fast as it +// can. Backpressure comes from the executor's bounded mempool channel: +// when full, InjectTx returns false and we count it as dropped. +// +// Each payload is `txSize` bytes: a tx-id (uint64) prefix + zero filler. +// Non-deterministic content isn't important — ev-node hashes them for +// the seen-tx cache, so any unique-per-tx prefix is enough to avoid +// dedup hits. +type loader struct { + exec *inMemExecutor + workers int + txSize int + + // counter monotonically increments per generated tx so the + // SHA-256-based seen cache never falsely dedups. + counter atomic.Uint64 +} + +func newLoader(exec *inMemExecutor, workers, txSize int) *loader { + if workers < 1 { + workers = 1 + } + if txSize < 8 { + txSize = 8 + } + return &loader{ + exec: exec, + workers: workers, + txSize: txSize, + } +} + +// run blocks until ctx is done. Each worker spins on InjectTx — when +// full, it briefly yields. We don't sleep-back-off because the entire +// point of the bench is to keep the executor's mempool pressed against +// its bound. +func (l *loader) run(ctx context.Context) { + var wg sync.WaitGroup + for i := 0; i < l.workers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, l.txSize) + for { + if ctx.Err() != nil { + return + } + id := l.counter.Add(1) + binary.BigEndian.PutUint64(buf, id) + // Copy on each Inject — the executor's mempool is a + // channel of []byte, and the consumer keeps a + // reference. Reusing the same buffer would corrupt + // in-flight items. + tx := make([]byte, l.txSize) + copy(tx, buf) + if !l.exec.InjectTx(tx) { + // Mempool full — back off briefly and retry. + select { + case <-ctx.Done(): + return + case <-time.After(loaderBackoff): + } + } + } + }() + } + wg.Wait() +} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/main.go b/tools/celestia-node-fiber/cmd/fiber-bench/main.go new file mode 100644 index 0000000000..1671e4a784 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/main.go @@ -0,0 +1,60 @@ +// Package main is the fiber-bench tool: a single-sequencer ev-node wired +// to a remote Fibre network for throughput measurement. +// +// It deliberately runs in the simplest possible configuration: +// +// - Solo sequencer (no based / no forced inclusion) +// - Aggregator-only (no syncer, no P2P) +// - In-memory executor with constant state root (no state computation +// cost in the measurement) +// - Bridge-bypass Fibre adapter (Upload directly via consensus gRPC + FSPs) +// +// The intent is a fail-fast baseline so we can isolate ev-node's batching +// + DA-submit pipeline from everything else. +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + // Pull celestia-app params for its init() that sets the global SDK + // bech32 prefix to "celestia" — must run before any keyring operation + // that prints addresses. + _ "github.com/celestiaorg/celestia-app/v9/app/params" + + rollconf "github.com/evstack/ev-node/pkg/config" +) + +// AppName names the binary. The home dir intentionally lives one level +// deeper at ~/.fiber-bench/node so the bench's --keep-home=false default +// (which os.RemoveAll's cfg.RootDir) cannot wipe the cosmos keyring at +// ~/.fiber-bench/keyring. +const ( + AppName = "fiber-bench" + defaultHomeAppName = AppName + "/node" +) + +func main() { + root := &cobra.Command{ + Use: AppName, + Short: "Single-sequencer ev-node throughput bench against a remote Fibre network", + } + + // Register --home, --evnode.log.level, --evnode.log.format, + // --evnode.log.trace on the root so every subcommand inherits them + // (matches apps/testapp). + rollconf.AddGlobalFlags(root, defaultHomeAppName) + + root.AddCommand( + keysCmd(), + escrowCmd(), + runCmd(), + ) + + if err := root.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/run-bench.sh b/tools/celestia-node-fiber/cmd/fiber-bench/run-bench.sh new file mode 100755 index 0000000000..43ea31d0be --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/run-bench.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +# run-bench.sh — convenience wrapper around `fiber-bench` for the +# common case: build the binary if missing, ensure a key exists, +# print the address, then start a run. +# +# Usage: +# CONSENSUS_GRPC=139.59.229.101:9091 \ +# CHAIN_ID=talis-evnode \ +# ./run-bench.sh [duration] [workers] +# +# All optional flags pass through via FIBER_BENCH_ARGS. +set -euo pipefail + +cd "$(dirname "$0")/../../.." + +CONSENSUS_GRPC="${CONSENSUS_GRPC:-}" +CHAIN_ID="${CHAIN_ID:-}" +KEYRING_DIR="${KEYRING_DIR:-$HOME/.fiber-bench/keyring}" +KEY_NAME="${KEY_NAME:-bench}" +DURATION="${1:-${DURATION:-2m}}" +WORKERS="${2:-${WORKERS:-32}}" +TX_SIZE="${TX_SIZE:-200}" +BLOCK_TIME="${BLOCK_TIME:-1s}" +BATCHING="${BATCHING:-immediate}" +HOME_DIR="${HOME_DIR:-$HOME/.fiber-bench/node}" + +if [[ -z "$CONSENSUS_GRPC" || -z "$CHAIN_ID" ]]; then + echo "ERROR: CONSENSUS_GRPC and CHAIN_ID must be set" >&2 + echo " example: CONSENSUS_GRPC=host:9091 CHAIN_ID=talis-evnode $0" >&2 + exit 1 +fi + +BIN="$(pwd)/bin/fiber-bench" +mkdir -p "$(dirname "$BIN")" + +if [[ ! -x "$BIN" || -n "${REBUILD:-}" ]]; then + echo "==> building fiber-bench (-tags fibre)" + go build -tags fibre -o "$BIN" ./cmd/fiber-bench/ +fi + +# Create the bench key if missing — idempotent: `keys add` errors if the +# key exists, so we only run it on a fresh keyring. +if ! "$BIN" keys show "$KEY_NAME" --keyring-dir "$KEYRING_DIR" >/dev/null 2>&1; then + echo "==> creating bench key '$KEY_NAME' at $KEYRING_DIR" + "$BIN" keys add "$KEY_NAME" --keyring-dir "$KEYRING_DIR" + echo + echo "Top up the address above and run:" + echo " $BIN escrow deposit --consensus-grpc $CONSENSUS_GRPC \\" + echo " --keyring-dir $KEYRING_DIR --key-name $KEY_NAME --amount 50000000" + echo + echo "Then re-run this script." + exit 0 +fi + +echo "==> bench account:" +"$BIN" keys show "$KEY_NAME" --keyring-dir "$KEYRING_DIR" + +echo "==> escrow:" +"$BIN" escrow query --consensus-grpc "$CONSENSUS_GRPC" \ + --keyring-dir "$KEYRING_DIR" --key-name "$KEY_NAME" || true + +echo "==> starting bench: duration=$DURATION workers=$WORKERS tx_size=$TX_SIZE block_time=$BLOCK_TIME batching=$BATCHING" +exec "$BIN" run \ + --evnode.da.fiber.consensus_address "$CONSENSUS_GRPC" \ + --evnode.da.fiber.consensus_chain_id "$CHAIN_ID" \ + --evnode.da.fiber.key_name "$KEY_NAME" \ + --keyring-dir "$KEYRING_DIR" \ + --home "$HOME_DIR" \ + --duration "$DURATION" \ + --workers "$WORKERS" \ + --tx-size "$TX_SIZE" \ + --evnode.node.block_time "$BLOCK_TIME" \ + --evnode.da.batching_strategy "$BATCHING" \ + ${FIBER_BENCH_ARGS:-} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/run.go b/tools/celestia-node-fiber/cmd/fiber-bench/run.go new file mode 100644 index 0000000000..0666790c26 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/run.go @@ -0,0 +1,306 @@ +package main + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/spf13/cobra" + + "github.com/evstack/ev-node/node" + rollcmd "github.com/evstack/ev-node/pkg/cmd" + rollconf "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/p2p/key" + "github.com/evstack/ev-node/pkg/sequencers/solo" + "github.com/evstack/ev-node/pkg/signer/file" + "github.com/evstack/ev-node/pkg/store" +) + +// Bench-local flag names. The rest come from rollconf.AddFlags +// (--evnode.da.fiber.consensus_address, --evnode.da.batching_strategy, …) +// and rollconf.AddGlobalFlags (--home, --log.level, …). +const ( + flagKeyringDir = "keyring-dir" + flagKeepHome = "keep-home" + flagDuration = "duration" + flagWorkers = "workers" + flagTxSize = "tx-size" + flagMempoolSize = "mempool-size" + flagStatsInterval = "stats-interval" + flagSignerPassphrase = "signer-passphrase" +) + +func runCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "run", + Short: "Run the bench: start a single-sequencer ev-node against a Fibre network and pump load", + RunE: runBench, + } + + // Canonical ev-node flags: --evnode.node.*, --evnode.da.*, + // --evnode.da.fiber.*, --evnode.signer.*, --evnode.instrumentation.*, + // --evnode.p2p.*, --evnode.signer.passphrase_file, etc. The bench + // applies opinionated defaults post-parse for the ones a thoughtful + // operator would otherwise have to flip every run (see runBench). + rollconf.AddFlags(cmd) + + flags := cmd.Flags() + flags.String(flagKeyringDir, defaultKeyringDir(), "directory holding the bench cosmos keyring (test backend) used to sign Fibre payment promises") + flags.Bool(flagKeepHome, false, "do not wipe the ev-node home before starting (resumes prior state)") + flags.Duration(flagDuration, 60*time.Second, "how long to run the bench before stopping (0 = until SIGINT)") + flags.Int(flagWorkers, 32, "number of concurrent tx-injection goroutines") + flags.Int(flagTxSize, 200, "size of each generated tx in bytes") + flags.Int(flagMempoolSize, 1_000_000, "size of the in-mem executor's mempool channel (backpressure boundary)") + flags.Duration(flagStatsInterval, time.Second, "how often to print a stats line") + flags.String(flagSignerPassphrase, "fiber-bench-passphrase", "passphrase for the ev-node file signer (block-signing key, NOT the cosmos one). Written to a temp file consumed by --evnode.signer.passphrase_file.") + + // Fibre consensus address/chain ID don't have empty defaults + // (DefaultConfig points at 127.0.0.1:9090 / mocha-4), but those + // values are sentinels — running the bench against them is never + // what the operator wants. Force them through. + _ = cobra.MarkFlagRequired(flags, rollconf.FlagDAFiberConsensusAddress) + _ = cobra.MarkFlagRequired(flags, rollconf.FlagDAFiberConsensusChainID) + + return cmd +} + +func runBench(cobraCmd *cobra.Command, _ []string) error { + cfg, err := rollcmd.ParseConfig(cobraCmd) + if err != nil { + return err + } + applyBenchDefaults(cobraCmd, &cfg) + + // Re-validate after the bench's overrides — ParseConfig already ran + // once on parse, but we mutated Aggregator/Fiber/etc. afterwards. + if err := cfg.Validate(); err != nil { + return fmt.Errorf("config invalid after bench overrides: %w", err) + } + + logger := rollcmd.SetupLogger(cfg.Log) + + keyringDir, _ := cobraCmd.Flags().GetString(flagKeyringDir) + keepHome, _ := cobraCmd.Flags().GetBool(flagKeepHome) + duration, _ := cobraCmd.Flags().GetDuration(flagDuration) + workers, _ := cobraCmd.Flags().GetInt(flagWorkers) + txSize, _ := cobraCmd.Flags().GetInt(flagTxSize) + mempoolSize, _ := cobraCmd.Flags().GetInt(flagMempoolSize) + statsInterval, _ := cobraCmd.Flags().GetDuration(flagStatsInterval) + signerPassphrase, _ := cobraCmd.Flags().GetString(flagSignerPassphrase) + + if !keepHome { + _ = os.RemoveAll(cfg.RootDir) + } + if err := os.MkdirAll(cfg.RootDir, 0o755); err != nil { + return fmt.Errorf("create home %s: %w", cfg.RootDir, err) + } + + // 1) Cosmos keyring + bridge-bypass Fibre adapter — the two genuinely + // fiber-bench-specific pieces. Neither lives in the production wiring + // path. + kr, err := openKeyring(keyringDir) + if err != nil { + return fmt.Errorf("open keyring at %s: %w", keyringDir, err) + } + rec, err := kr.Key(cfg.DA.Fiber.KeyName) + if err != nil { + return fmt.Errorf("key %q not found in keyring %s — run `fiber-bench keys add %s` first: %w", + cfg.DA.Fiber.KeyName, keyringDir, cfg.DA.Fiber.KeyName, err) + } + addr, err := rec.GetAddress() + if err != nil { + return fmt.Errorf("derive key address: %w", err) + } + logger.Info().Str("address", addr.String()).Str("key", cfg.DA.Fiber.KeyName).Msg("loaded fibre signing key") + + logger.Info().Str("grpc", cfg.DA.Fiber.ConsensusAddress).Msg("dialing consensus gRPC") + innerFiberClient, fiberClose, err := buildFibreAdapter(cobraCmd.Context(), cfg.DA.Fiber.ConsensusAddress, cfg.DA.Fiber.KeyName, kr) + if err != nil { + return fmt.Errorf("build fibre adapter: %w", err) + } + defer func() { + if err := fiberClose(); err != nil { + logger.Warn().Err(err).Msg("fibre adapter close") + } + }() + // Wrap in a latency-recording proxy so the stats printer can show + // per-Upload p50/p99. + fiberClient := newInstrumentedAdapter(innerFiberClient) + + // 2) ev-node block-signing key. Created in cfg.Signer.SignerPath if + // missing. cmd.StartNode reads the passphrase from the path stored + // in --evnode.signer.passphrase_file; we write a temp file from + // --signer-passphrase and inject the flag value so the canonical + // signer-loading path works without us asking the operator to manage + // a passphrase file by hand. + signerDir := cfg.Signer.SignerPath + if signerDir == "" { + signerDir = filepath.Join(cfg.RootDir, "config") + } + if !filepath.IsAbs(signerDir) { + signerDir = filepath.Join(cfg.RootDir, signerDir) + } + cfg.Signer.SignerPath = signerDir + if err := os.MkdirAll(signerDir, 0o750); err != nil { + return fmt.Errorf("create signer dir: %w", err) + } + signerFile := filepath.Join(signerDir, "signer.json") + if _, statErr := os.Stat(signerFile); os.IsNotExist(statErr) { + s, err := file.CreateFileSystemSigner(signerDir, []byte(signerPassphrase)) + if err != nil { + return fmt.Errorf("create file signer: %w", err) + } + if _, err := s.GetAddress(); err != nil { + return fmt.Errorf("signer address: %w", err) + } + } + passphraseFile := filepath.Join(cfg.RootDir, "passphrase.txt") + if err := os.WriteFile(passphraseFile, []byte(signerPassphrase), 0o600); err != nil { + return fmt.Errorf("write passphrase file: %w", err) + } + if err := cobraCmd.Flags().Set(rollconf.FlagSignerPassphraseFile, passphraseFile); err != nil { + return fmt.Errorf("set passphrase flag: %w", err) + } + + // Reload the signer to derive the genesis proposer address. + loaded, err := file.LoadFileSystemSigner(signerDir, []byte(signerPassphrase)) + if err != nil { + return fmt.Errorf("load file signer: %w", err) + } + signerAddr, err := loaded.GetAddress() + if err != nil { + return fmt.Errorf("signer address: %w", err) + } + + // 3) Genesis. Single proposer = our signer. + gen := genesis.NewGenesis(cfg.DA.Fiber.ConsensusChainID, 1, time.Now().UTC(), signerAddr) + if err := gen.Validate(); err != nil { + return fmt.Errorf("invalid genesis: %w", err) + } + + // 4) Datastore + node-key + executor + sequencer. The first three + // look identical to what testapp/cmd/run.go does; the executor + // is the bench-specific in-memory variant (constant state root, + // see executor.go for rationale) and the sequencer is solo (no + // based-sequencer / no forced inclusion machinery). + ds, err := store.NewDefaultKVStore(cfg.RootDir, cfg.DBPath, "fiber-bench") + if err != nil { + return fmt.Errorf("open datastore: %w", err) + } + // Match canonical layout: node_key.json under /config/, the + // same dir testapp/cmd/run.go reads it from. + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(cfg.ConfigPath())) + if err != nil { + return fmt.Errorf("node key: %w", err) + } + exec := newInMemExecutor(mempoolSize) + seq := solo.NewSoloSequencer(logger, []byte(gen.ChainID), exec) + + // 5) Spawn loader + stats printer BEFORE cmd.StartNode (which + // blocks). They run for the lifetime of the bench. cmd.StartNode + // owns its own signal-handling goroutine; we send SIGINT to + // ourselves when the duration timer expires so it can exit + // through its normal shutdown path. + bgCtx, bgCancel := signal.NotifyContext(cobraCmd.Context(), os.Interrupt, syscall.SIGTERM) + defer bgCancel() + + var loaderWg sync.WaitGroup + loaderWg.Add(1) + go func() { + defer loaderWg.Done() + newLoader(exec, workers, txSize).run(bgCtx) + }() + + printer := newStatsPrinter(exec, cfg.Instrumentation.PrometheusListenAddr, txSize, fiberClient) + printer.start(bgCtx, statsInterval) + + logger.Info(). + Dur("duration", duration). + Int("workers", workers). + Int("tx_size", txSize). + Int("mempool", mempoolSize). + Dur("block_time", cfg.Node.BlockTime.Duration). + Str("batching", cfg.DA.BatchingStrategy). + Msg("bench started") + + if duration > 0 { + go func() { + select { + case <-time.After(duration): + logger.Info().Msg("duration elapsed, sending SIGINT to trigger shutdown") + _ = syscall.Kill(syscall.Getpid(), syscall.SIGINT) + case <-bgCtx.Done(): + } + }() + } + + // 6) The actual node — let cmd.StartNode do all the wiring (signer + // load, DA client, p2p, node.NewNode, run loop with shutdown). Same + // call testapp/evm/grpc apps make. + startErr := rollcmd.StartNode( + logger, cobraCmd, exec, seq, nodeKey, ds, cfg, gen, + node.NodeOptions{}, fiberClient, + ) + + bgCancel() + loaderWg.Wait() + printer.printFinalSummary() + + if startErr != nil && !errors.Is(startErr, context.Canceled) { + return startErr + } + return nil +} + +// applyBenchDefaults overrides config fields that the bench needs forced +// (Aggregator, Fiber.Enabled) and the canonical defaults that are wrong +// for a throughput bench (DA block time, batching strategy, scrape +// interval, namespaces). Anything the operator passed on the command line +// is left untouched — we only override where the flag value still equals +// its canonical default. +func applyBenchDefaults(cmd *cobra.Command, cfg *rollconf.Config) { + // Forced for the bench: aggregator-only, Fibre DA, no P2P. + cfg.Node.Aggregator = true + cfg.Node.BasedSequencer = false + cfg.DA.Fiber.Enabled = true + if cfg.DA.Fiber.BridgeAddress == "" { + // FiberDAConfig.Validate requires a ws:// or wss:// address. + // Bench never dials it (see fibre.go: noBridgeBlob). + cfg.DA.Fiber.BridgeAddress = "ws://127.0.0.1:0" + } + cfg.P2P.ListenAddress = "/ip4/127.0.0.1/tcp/0" + cfg.P2P.DisableConnectionGater = true + cfg.RPC.Address = "127.0.0.1:0" + cfg.Signer.SignerType = "file" + cfg.Instrumentation.Pprof = false + // The stats printer scrapes /metrics every tick — keep Prometheus on + // even if the operator didn't pass --evnode.instrumentation.prometheus. + cfg.Instrumentation.Prometheus = true + + // Operator-overridable bench defaults — applied only if the canonical + // flag wasn't passed on the command line. + overrideIfUnchanged := func(name string, set func()) { + if !cmd.Flags().Changed(name) { + set() + } + } + overrideIfUnchanged(rollconf.FlagDABlockTime, func() { + cfg.DA.BlockTime = rollconf.DurationWrapper{Duration: time.Second} + }) + overrideIfUnchanged(rollconf.FlagDABatchingStrategy, func() { cfg.DA.BatchingStrategy = "immediate" }) + overrideIfUnchanged(rollconf.FlagScrapeInterval, func() { + cfg.Node.ScrapeInterval = rollconf.DurationWrapper{Duration: 100 * time.Millisecond} + }) + overrideIfUnchanged(rollconf.FlagDARequestTimeout, func() { + cfg.DA.RequestTimeout = rollconf.DurationWrapper{Duration: 60 * time.Second} + }) + overrideIfUnchanged(rollconf.FlagDANamespace, func() { cfg.DA.Namespace = "fb-bench-h" }) + overrideIfUnchanged(rollconf.FlagDADataNamespace, func() { cfg.DA.DataNamespace = "fb-bench-d" }) +} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/stats.go b/tools/celestia-node-fiber/cmd/fiber-bench/stats.go new file mode 100644 index 0000000000..f45fb91c06 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/stats.go @@ -0,0 +1,380 @@ +package main + +import ( + "bufio" + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + "time" +) + +// statsPrinter periodically prints a one-line summary combining counters +// from the in-mem executor and selected Prometheus metrics scraped from +// ev-node's instrumentation endpoint. +// +// Why scrape Prometheus instead of reaching into ev-node? Because the +// metrics ev-node already exports give us the answers we want +// (committed height, txs-per-block, pending blobs, block-production +// duration histogram) and scraping is zero source diff. It also makes +// the same numbers available to a real Prometheus once we move past the +// fail-fast baseline. +type statsPrinter struct { + exec *inMemExecutor + promURL string + httpClient *http.Client + txSize int + adapter *instrumentedAdapter + + mu sync.Mutex + startedAt time.Time + lastTick time.Time + lastInject uint64 + lastTxs float64 + lastBlocks float64 + lastDaInc float64 + peakInjRPS float64 + peakTxRPS float64 + peakDaRPS float64 + + // lastSnapshot caches the last successful Prometheus scrape so + // the final summary still has values after the node has shut + // down (its /metrics endpoint goes away with it). + lastSnapshot map[string]float64 +} + +func newStatsPrinter(exec *inMemExecutor, promListenAddr string, txSize int, adapter *instrumentedAdapter) *statsPrinter { + url := "" + if promListenAddr != "" { + // PrometheusListenAddr can be ":26660" or "127.0.0.1:26660"; + // normalize to a fetchable URL. + host := promListenAddr + if strings.HasPrefix(host, ":") { + host = "127.0.0.1" + host + } + url = "http://" + host + "/metrics" + } + return &statsPrinter{ + exec: exec, + promURL: url, + httpClient: &http.Client{Timeout: 500 * time.Millisecond}, + txSize: txSize, + adapter: adapter, + } +} + +// start prints a header then ticks every interval until ctx is done. +func (p *statsPrinter) start(ctx context.Context, interval time.Duration) { + if interval <= 0 { + interval = time.Second + } + now := time.Now() + p.mu.Lock() + p.startedAt = now + p.lastTick = now + p.mu.Unlock() + + fmt.Println() + // Each rate column shows " / " so tps and bandwidth + // land side by side without doubling the column count. The blob + // size at the latest block stays as an absolute (blob_KB) since + // it's a level, not a rate. + fmt.Printf("%-9s %-15s %-15s %-15s %-7s %-9s %-7s %-8s %-7s %-10s %s\n", + "elapsed", "inj tps/MBs", "exec tps/MBs", "da tps/MBs", + "prod_h", "da_inc_h", "txs/blk", "blob_KB", "pending", "drops", "upload latency") + fmt.Println(strings.Repeat("-", 140)) + + go func() { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.tick() + } + } + }() +} + +func (p *statsPrinter) tick() { + now := time.Now() + + injected, dropped, blocks, txs, _ := p.exec.Stats() + mempool := p.exec.MempoolDepth() + + prom := p.scrapePrometheus() + if len(prom) > 0 { + p.mu.Lock() + p.lastSnapshot = prom + p.mu.Unlock() + } + // ev-node prefixes its metrics with the namespace from the metrics + // provider — for the aggregator path this is "evnode_sequencer". + producedHeight := prom["evnode_sequencer_height"] + daInclusionHeight := prom["evnode_sequencer_da_inclusion_height"] + totalTxs := prom["evnode_sequencer_total_txs"] + if totalTxs == 0 { + totalTxs = float64(txs) + } + blockBytes := prom["evnode_sequencer_block_size_bytes"] + pending := prom["evnode_sequencer_da_submitter_pending_blobs"] + blocksGauge := float64(blocks) + if producedHeight > blocksGauge { + blocksGauge = producedHeight + } + txsPerBlock := txsPerBlockMetric(blocksGauge, totalTxs) + + p.mu.Lock() + dt := now.Sub(p.lastTick).Seconds() + if dt < 0.001 { + p.mu.Unlock() + return + } + injRPS := float64(injected-p.lastInject) / dt + txRPS := (totalTxs - p.lastTxs) / dt + daSettledRPS := (daInclusionHeight - p.lastDaInc) * txsPerBlock / dt + if injRPS > p.peakInjRPS { + p.peakInjRPS = injRPS + } + if txRPS > p.peakTxRPS { + p.peakTxRPS = txRPS + } + if daSettledRPS > p.peakDaRPS { + p.peakDaRPS = daSettledRPS + } + elapsed := now.Sub(p.startedAt).Truncate(time.Millisecond) + p.lastTick = now + p.lastInject = injected + p.lastTxs = totalTxs + p.lastBlocks = blocksGauge + p.lastDaInc = daInclusionHeight + p.mu.Unlock() + + txSizeBytes := float64(p.txSize) + + upStats := p.adapter.uploadStats() + + fmt.Printf("%-9s %-15s %-15s %-15s %-7.0f %-9.0f %-7.0f %-8.0f %-7.0f %-10d %s\n", + elapsed.String(), + formatRate(injRPS, txSizeBytes), + formatRate(txRPS, txSizeBytes), + formatRate(daSettledRPS, txSizeBytes), + producedHeight, daInclusionHeight, txsPerBlock, blockBytes/1024, pending, dropped, + formatUploadLatency(upStats), + ) + + _ = mempool // currently we report drops, not depth — the mempool is large enough that depth isn't the meaningful signal +} + +// formatUploadLatency renders Upload latency stats as a compact suffix +// for the live table. Returns "-" if no samples yet. +func formatUploadLatency(s uploadStats) string { + if s.Count == 0 { + return "upload[-]" + } + failPart := "" + if s.Failures > 0 { + failPart = fmt.Sprintf(",fails=%d", s.Failures) + } + return fmt.Sprintf("upload[n=%d p50=%v p99=%v%s]", + s.Count, s.P50.Truncate(time.Millisecond), s.P99.Truncate(time.Millisecond), failPart) +} + +// formatRate renders " / " compactly, rounding to whole MB/s +// since sub-MB/s precision isn't useful at our throughput levels and a +// short string keeps the table aligned. +func formatRate(rps, txSizeBytes float64) string { + mbps := rps * txSizeBytes / (1024 * 1024) + switch { + case rps >= 1_000_000: + return fmt.Sprintf("%.1fM/%.0fMB", rps/1_000_000, mbps) + case rps >= 1_000: + return fmt.Sprintf("%.0fk/%.0fMB", rps/1_000, mbps) + default: + return fmt.Sprintf("%.0f/%.1fMB", rps, mbps) + } +} + +// txsPerBlockMetric computes the running mean tx/blk over all produced +// blocks. Only meaningful once at least one block has been produced; +// returns 0 otherwise. +func txsPerBlockMetric(blocks, totalTxs float64) float64 { + if blocks <= 0 { + return 0 + } + return totalTxs / blocks +} + +// scrapePrometheus pulls the ev-node /metrics endpoint and parses just +// the gauges/counters we care about. Best effort: returns empty map on +// any error so the bench keeps running even if metrics aren't ready yet. +func (p *statsPrinter) scrapePrometheus() map[string]float64 { + out := map[string]float64{} + if p.promURL == "" { + return out + } + resp, err := p.httpClient.Get(p.promURL) + if err != nil { + return out + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + _, _ = io.Copy(io.Discard, resp.Body) + return out + } + + wanted := map[string]struct{}{ + "evnode_sequencer_height": {}, + "evnode_sequencer_latest_block_height": {}, + "evnode_sequencer_da_inclusion_height": {}, + "evnode_sequencer_total_txs": {}, + "evnode_sequencer_num_txs": {}, + "evnode_sequencer_block_size_bytes": {}, + "evnode_sequencer_da_submitter_pending_blobs": {}, + } + + scanner := bufio.NewScanner(resp.Body) + for scanner.Scan() { + line := scanner.Text() + if line == "" || strings.HasPrefix(line, "#") { + continue + } + // "metric_name{labels...} value [timestamp]" — strip labels and + // trailing timestamp; we don't use them. + nameEnd := strings.IndexAny(line, "{ ") + if nameEnd < 0 { + continue + } + name := line[:nameEnd] + if _, ok := wanted[name]; !ok { + continue + } + // Skip past labels if present. + rest := line[nameEnd:] + if rest[0] == '{' { + closeIdx := strings.Index(rest, "}") + if closeIdx < 0 { + continue + } + rest = rest[closeIdx+1:] + } + rest = strings.TrimSpace(rest) + valEnd := strings.IndexByte(rest, ' ') + valStr := rest + if valEnd >= 0 { + valStr = rest[:valEnd] + } + v, err := strconv.ParseFloat(valStr, 64) + if err != nil { + continue + } + out[name] = v + } + return out +} + +func (p *statsPrinter) printFinalSummary() { + injected, dropped, blocks, txs, mempoolHigh := p.exec.Stats() + // Prefer a fresh scrape, but fall back to the last live snapshot: + // the node's /metrics endpoint goes away as it shuts down, so a + // post-stop scrape returns an empty map and the summary would + // otherwise print zeros. + prom := p.scrapePrometheus() + p.mu.Lock() + if len(prom) == 0 && p.lastSnapshot != nil { + prom = p.lastSnapshot + } + p.mu.Unlock() + producedHeight := uint64(prom["evnode_sequencer_height"]) + daInclusionHeight := uint64(prom["evnode_sequencer_da_inclusion_height"]) + totalTxs := uint64(prom["evnode_sequencer_total_txs"]) + if totalTxs == 0 { + totalTxs = txs + } + + p.mu.Lock() + elapsed := time.Since(p.startedAt) + peakInj := p.peakInjRPS + peakTx := p.peakTxRPS + p.mu.Unlock() + + avgInj := 0.0 + if elapsed.Seconds() > 0 { + avgInj = float64(injected) / elapsed.Seconds() + } + avgTx := 0.0 + if elapsed.Seconds() > 0 { + avgTx = float64(totalTxs) / elapsed.Seconds() + } + txsPerBlock := 0.0 + if blocks > 0 { + txsPerBlock = float64(totalTxs) / float64(blocks) + } + txSize := float64(p.txSize) + mb := func(rps float64) float64 { return rps * txSize / (1024 * 1024) } + + p.mu.Lock() + peakDa := p.peakDaRPS + p.mu.Unlock() + + var avgDaSettled float64 + if daInclusionHeight > 0 && elapsed.Seconds() > 0 { + avgDaSettled = float64(daInclusionHeight) * txsPerBlock / elapsed.Seconds() + } + + fmt.Println() + fmt.Println(strings.Repeat("=", 70)) + fmt.Println(" BASELINE SUMMARY") + fmt.Println(strings.Repeat("=", 70)) + fmt.Printf("Duration: %s\n", elapsed.Truncate(time.Millisecond)) + fmt.Printf("Tx size: %d B\n", p.txSize) + fmt.Println() + fmt.Printf("Injection: avg %.0f tx/s (%.1f MB/s), peak %.0f tx/s (%.0f MB/s)\n", + avgInj, mb(avgInj), peakInj, mb(peakInj)) + fmt.Printf("Block production: avg %.0f tx/s (%.2f MB/s), peak %.0f tx/s (%.1f MB/s)\n", + avgTx, mb(avgTx), peakTx, mb(peakTx)) + fmt.Printf("DA-settled: avg %.0f tx/s (%.2f MB/s), peak %.0f tx/s (%.1f MB/s)\n", + avgDaSettled, mb(avgDaSettled), peakDa, mb(peakDa)) + fmt.Println() + fmt.Printf("Blocks produced: %d (prod_h=%d)\n", blocks, producedHeight) + fmt.Printf("DA-included height: %d (lag = %d blocks behind production)\n", + daInclusionHeight, producedHeight-daInclusionHeight) + fmt.Printf("Txs into blocks: %d (%.1f tx/blk)\n", totalTxs, txsPerBlock) + fmt.Printf("Dropped (mempool full): %d\n", dropped) + fmt.Printf("Mempool high-water: %d\n", mempoolHigh) + + upStats := p.adapter.uploadStats() + if upStats.Count > 0 { + fmt.Println() + fmt.Println("Fibre Upload latency (per call observed at the adapter):") + fmt.Printf(" count: %d (failures: %d)\n", upStats.Count, upStats.Failures) + fmt.Printf(" mean: %s\n", upStats.Mean.Truncate(time.Millisecond)) + fmt.Printf(" p50: %s\n", upStats.P50.Truncate(time.Millisecond)) + fmt.Printf(" p99: %s\n", upStats.P99.Truncate(time.Millisecond)) + fmt.Printf(" max: %s\n", upStats.Max.Truncate(time.Millisecond)) + // ev-node's submitter runs ONE header-Upload goroutine and + // ONE data-Upload goroutine concurrently (each TryLock'd via + // its own mutex in submitter.go). A block settles only when + // BOTH its header and data Uploads have returned, and each + // stream submits at most 1 Upload per mean_latency seconds — + // so the per-stream cap is 1/mean blocks/s, and the block + // settlement cap (min of the two) equals it. We print this + // so the operator can compare it to the observed da_inc_h + // rate and tell apart "Fibre Upload is slow" from "ev-node + // is leaving capacity on the table". + if upStats.Mean > 0 { + capBlocksPerSec := 1.0 / upStats.Mean.Seconds() + fmt.Printf(" implied cap (1/mean per stream): %.2f blocks/s ≈ %.0f tx/s (%.2f MB/s)\n", + capBlocksPerSec, + capBlocksPerSec*txsPerBlock, + capBlocksPerSec*txsPerBlock*txSize/(1024*1024), + ) + } + } + fmt.Println(strings.Repeat("=", 70)) +} diff --git a/tools/celestia-node-fiber/cmd/fiber-bench/util.go b/tools/celestia-node-fiber/cmd/fiber-bench/util.go new file mode 100644 index 0000000000..666d00f041 --- /dev/null +++ b/tools/celestia-node-fiber/cmd/fiber-bench/util.go @@ -0,0 +1,17 @@ +package main + +import ( + "os" + "path/filepath" +) + +// defaultKeyringDir is where we put the bench's cosmos keyring by default. +// Sibling of the ev-node home (~/.fiber-bench/node) so --keep-home=false +// runs cannot wipe it. +func defaultKeyringDir() string { + home, err := os.UserHomeDir() + if err != nil { + return ".fiber-bench-keyring" + } + return filepath.Join(home, ".fiber-bench", "keyring") +} diff --git a/tools/celestia-node-fiber/config.go b/tools/celestia-node-fiber/config.go new file mode 100644 index 0000000000..784d8a0faf --- /dev/null +++ b/tools/celestia-node-fiber/config.go @@ -0,0 +1,18 @@ +package celestianodefiber + +import ( + "github.com/celestiaorg/celestia-node/api/client" +) + +// Config configures the celestia-node-backed Fibre adapter. +type Config struct { + // Client is the full celestia-node api/client.Config. See that package + // for field semantics (ReadConfig.BridgeDAAddr, SubmitConfig.DefaultKeyName, + // SubmitConfig.CoreGRPCConfig, SubmitConfig.Fibre, etc.). + Client client.Config + + // ListenChannelSize bounds the buffered BlobEvent channel returned by + // Listen. 0 selects the default (16), matching the upstream + // blob.Subscribe buffer so backpressure behaves consistently. + ListenChannelSize int +} diff --git a/tools/celestia-node-fiber/doc.go b/tools/celestia-node-fiber/doc.go new file mode 100644 index 0000000000..1e1d22e394 --- /dev/null +++ b/tools/celestia-node-fiber/doc.go @@ -0,0 +1,14 @@ +// Package celestianodefiber implements the ev-node fiber.DA interface by +// delegating to a celestia-node api/client.Client. +// +// Upload and Download run locally against a Celestia consensus node (over +// gRPC) and Fibre Storage Providers (over Fibre gRPC), without a bridge-node +// hop, using the self-sufficient client introduced in celestia-node #4961. +// Listen subscribes to a bridge node's blob stream (JSON-RPC/HTTP) and +// forwards only share-version-2 blobs — the ones settled on-chain through +// MsgPayForFibre. +// +// This package is a separate Go sub-module from the parent ev-node repository +// so that ev-node core does not pick up the celestia-app / cosmos-sdk +// replace-directive soup that celestia-node requires. +package celestianodefiber diff --git a/tools/celestia-node-fiber/go.mod b/tools/celestia-node-fiber/go.mod new file mode 100644 index 0000000000..569776c998 --- /dev/null +++ b/tools/celestia-node-fiber/go.mod @@ -0,0 +1,418 @@ +module github.com/evstack/ev-node/tools/celestia-node-fiber + +go 1.26.1 + +replace ( + // celestia-node replace directives (copied from its go.mod so we pull the + // same Celestia forks). Keep this block in sync with + // github.com/celestiaorg/celestia-node go.mod. + cosmossdk.io/api => github.com/celestiaorg/cosmos-sdk/api v0.7.6 + cosmossdk.io/log => github.com/celestiaorg/cosmos-sdk/log v1.1.1-0.20251116153902-f48fea92e627 + cosmossdk.io/x/upgrade => github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0 + github.com/bytedance/sonic => github.com/bytedance/sonic v1.15.0 + github.com/bytedance/sonic/loader => github.com/bytedance/sonic/loader v0.5.0 + github.com/cloudwego/base64x => github.com/cloudwego/base64x v0.1.6 + github.com/cometbft/cometbft => github.com/celestiaorg/celestia-core v0.40.1 + github.com/consensys/gnark-crypto => github.com/consensys/gnark-crypto v0.18.0 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v0.52.3 + github.com/cosmos/ibc-go/v8 => github.com/celestiaorg/ibc-go/v8 v8.7.2 + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + github.com/ipfs/boxo => github.com/celestiaorg/boxo v0.29.0-fork-4 + github.com/ipfs/go-datastore => github.com/celestiaorg/go-datastore v0.0.0-20250801131506-48a63ae531e4 + github.com/moby/term => github.com/moby/term v0.5.2 + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.55.0-tm-v0.34.35 + nhooyr.io/websocket => github.com/coder/websocket v1.8.6 +) + +// Use the parent ev-node module for block.* types re-exported from +// block/public.go. +replace github.com/evstack/ev-node => ../../. + +require ( + cosmossdk.io/math v1.5.3 + github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427114616-9ff33a36eb19 + github.com/celestiaorg/celestia-node v0.29.3-mocha.0.20260427115656-23bec13f01a3 + github.com/celestiaorg/go-square/v4 v4.0.0-rc4.0.20260318002530-1ca8ff7b42ea + github.com/cometbft/cometbft v1.0.1 + github.com/cosmos/cosmos-sdk v0.50.13 + github.com/cristalhq/jwt/v5 v5.4.0 + github.com/evstack/ev-node v1.1.0 + github.com/evstack/ev-node/core v1.0.0 + github.com/ipfs/go-datastore v0.9.1 + github.com/libp2p/go-libp2p v0.48.0 + github.com/rs/zerolog v1.35.1 + github.com/spf13/cobra v1.10.2 + github.com/stretchr/testify v1.11.1 + go.uber.org/fx v1.24.0 + google.golang.org/grpc v1.80.0 +) + +require ( + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go v0.123.0 // indirect + cloud.google.com/go/auth v0.20.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.7.0 // indirect + cloud.google.com/go/kms v1.29.0 // indirect + cloud.google.com/go/longrunning v0.9.0 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/storage v1.61.3 // indirect + connectrpc.com/connect v1.19.2 // indirect + connectrpc.com/grpcreflect v1.3.0 // indirect + cosmossdk.io/api v1.0.0 // indirect + cosmossdk.io/client/v2 v2.0.0-beta.8 // indirect + cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/core v1.1.0 // indirect + cosmossdk.io/depinject v1.2.1 // indirect + cosmossdk.io/errors v1.0.2 // indirect + cosmossdk.io/log v1.6.1 // indirect + cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/circuit v0.1.1 // indirect + cosmossdk.io/x/evidence v0.1.1 // indirect + cosmossdk.io/x/feegrant v0.1.1 // indirect + cosmossdk.io/x/tx v0.13.8 // indirect + cosmossdk.io/x/upgrade v0.1.4 // indirect + filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 // indirect + filippo.io/edwards25519 v1.1.1 // indirect + filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect + github.com/DataDog/zstd v1.5.7 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect + github.com/RaduBerinde/axisds v0.1.0 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.41.6 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.16 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.15 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 // indirect + github.com/aws/smithy-go v1.25.1 // indirect + github.com/bcp-innovations/hyperlane-cosmos v1.1.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect + github.com/bits-and-blooms/bitset v1.24.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/celestiaorg/go-header v0.8.5 // indirect + github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 // indirect + github.com/celestiaorg/go-square/v2 v2.3.3 // indirect + github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect + github.com/celestiaorg/nmt v0.24.3 // indirect + github.com/celestiaorg/rsmt2d v0.15.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.12.0 // indirect + github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/pebble/v2 v2.1.4 // indirect + github.com/cockroachdb/redact v1.1.6 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb // indirect + github.com/cometbft/cometbft-db v1.0.4 // indirect + github.com/consensys/gnark v0.14.0 // indirect + github.com/consensys/gnark-crypto v0.19.2 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-db v1.1.3 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogogateway v1.2.0 // indirect + github.com/cosmos/gogoproto v1.7.2 // indirect + github.com/cosmos/iavl v1.2.8 // indirect + github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0 // indirect + github.com/cosmos/ibc-go/modules/capability v1.0.1 // indirect + github.com/cosmos/ibc-go/v8 v8.7.0 // indirect + github.com/cosmos/ics23/go v0.11.0 // indirect + github.com/cosmos/ledger-cosmos-go v0.15.0 // indirect + github.com/cskr/pubsub v1.0.2 // indirect + github.com/danieljoos/wincred v1.2.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 // indirect + github.com/desertbit/timer v1.0.1 // indirect + github.com/dgraph-io/badger/v4 v4.9.1 // indirect + github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect + github.com/dunglas/httpsfv v1.1.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect + github.com/ethereum/go-ethereum v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/filecoin-project/go-jsonrpc v0.10.1 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gammazero/chanqueue v1.1.1 // indirect + github.com/gammazero/deque v1.2.1 // indirect + github.com/gammazero/workerpool v1.2.1 // indirect + github.com/getsentry/sentry-go v0.42.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.4 // indirect + github.com/go-kit/kit v0.13.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect + github.com/goccy/go-yaml v1.19.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/flock v0.13.0 // indirect + github.com/gogo/googleapis v1.4.1 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/flatbuffers v25.2.10+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/orderedcode v0.0.1 // indirect + github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.21.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grafana/otel-profiling-go v0.5.1 // indirect + github.com/grafana/pyroscope-go v1.2.8 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.8.6 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/go-plugin v1.6.3 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/raft v1.7.3 // indirect + github.com/hashicorp/raft-boltdb v0.0.0-20251103221153-05f9dd7a5148 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/huandu/skiplist v1.2.1 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/iancoleman/orderedmap v0.3.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/improbable-eng/grpc-web v0.15.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/boxo v0.37.0 // indirect + github.com/ipfs/go-block-format v0.2.3 // indirect + github.com/ipfs/go-cid v0.6.0 // indirect + github.com/ipfs/go-ds-badger4 v0.1.8 // indirect + github.com/ipfs/go-ipfs-delay v0.0.1 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipld-format v0.6.3 // indirect + github.com/ipfs/go-ipld-legacy v0.2.2 // indirect + github.com/ipfs/go-log/v2 v2.9.1 // indirect + github.com/ipfs/go-metrics-interface v0.3.0 // indirect + github.com/ipfs/go-metrics-prometheus v0.1.0 // indirect + github.com/ipfs/go-peertaskqueue v0.8.2 // indirect + github.com/ipld/go-codec-dagpb v1.7.0 // indirect + github.com/ipld/go-ipld-prime v0.22.0 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/klauspost/compress v1.18.5 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a // indirect + github.com/koron/go-ssdp v0.0.6 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.12.3 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.3.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.39.1 // indirect + github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.16.0 // indirect + github.com/libp2p/go-libp2p-record v0.3.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-netroute v0.4.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v5 v5.0.1 // indirect + github.com/linxGnu/grocksdb v1.9.8 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/highwayhash v1.0.4 // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.1 // indirect + github.com/multiformats/go-multiaddr-dns v0.5.0 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.10.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.3.0 // indirect + github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v3 v3.1.2 // indirect + github.com/pion/ice/v4 v4.0.10 // indirect + github.com/pion/interceptor v0.1.40 // indirect + github.com/pion/logging v0.2.4 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.16 // indirect + github.com/pion/rtp v1.8.19 // indirect + github.com/pion/sctp v1.8.39 // indirect + github.com/pion/sdp/v3 v3.0.18 // indirect + github.com/pion/srtp/v3 v3.0.6 // indirect + github.com/pion/stun/v3 v3.1.1 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/transport/v4 v4.0.1 // indirect + github.com/pion/turn/v4 v4.0.2 // indirect + github.com/pion/webrtc/v4 v4.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.20.1 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.59.0 // indirect + github.com/quic-go/webtransport-go v0.10.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/rollkit/go-da v0.9.0 // indirect + github.com/ronanh/intcomp v1.1.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sasha-s/go-deadlock v0.3.9 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wlynxg/anet v0.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/zondax/golem v0.27.0 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v1.0.1 // indirect + go.etcd.io/bbolt v1.4.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.67.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.68.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect + go.uber.org/dig v1.19.0 // indirect + go.uber.org/mock v0.5.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/oauth2 v0.36.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c // indirect + golang.org/x/term v0.42.0 // indirect + golang.org/x/text v0.36.0 // indirect + golang.org/x/time v0.15.0 // indirect + golang.org/x/tools v0.43.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.17.0 // indirect + google.golang.org/api v0.276.0 // indirect + google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + nhooyr.io/websocket v1.8.17 // indirect + pgregory.net/rapid v1.2.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/tools/celestia-node-fiber/go.sum b/tools/celestia-node-fiber/go.sum new file mode 100644 index 0000000000..17ac7a1de7 --- /dev/null +++ b/tools/celestia-node-fiber/go.sum @@ -0,0 +1,1899 @@ +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v1.7.0 h1:JD3zh0C6LHl16aCn5Akff0+GELdp1+4hmh6ndoFLl8U= +cloud.google.com/go/iam v1.7.0/go.mod h1:tetWZW1PD/m6vcuY2Zj/aU0eCHNPuxedbnbRTyKXvdY= +cloud.google.com/go/kms v1.29.0 h1:bAW1C5FQf+6GhPkywQzPlsULALCG7c16qpXLFGV9ivY= +cloud.google.com/go/kms v1.29.0/go.mod h1:YIyXZym11R5uovJJt4oN5eUL3oPmirF3yKeIh6QAf4U= +cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= +cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= +cloud.google.com/go/longrunning v0.9.0 h1:0EzbDEGsAvOZNbqXopgniY0w0a1phvu5IdUFq8grmqY= +cloud.google.com/go/longrunning v0.9.0/go.mod h1:pkTz846W7bF4o2SzdWJ40Hu0Re+UoNT6Q5t+igIcb8E= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.61.3 h1:VS//ZfBuPGDvakfD9xyPW1RGF1Vy3BWUoVZXgW1KMOg= +cloud.google.com/go/storage v1.61.3/go.mod h1:JtqK8BBB7TWv0HVGHubtUdzYYrakOQIsMLffZ2Z/HWk= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= +connectrpc.com/connect v1.19.2 h1:McQ83FGdzL+t60peksi0gXC7MQ/iLKgLduAnThbM0mo= +connectrpc.com/connect v1.19.2/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= +connectrpc.com/grpcreflect v1.3.0 h1:Y4V+ACf8/vOb1XOc251Qun7jMB75gCUNw6llvB9csXc= +connectrpc.com/grpcreflect v1.3.0/go.mod h1:nfloOtCS8VUQOQ1+GTdFzVg2CJo4ZGaat8JIovCtDYs= +cosmossdk.io/client/v2 v2.0.0-beta.8 h1:RXMJdA4V9H1H3/3BfMD6dAW3lF8W9DpNPPYnKD+ArxY= +cosmossdk.io/client/v2 v2.0.0-beta.8/go.mod h1:x+E2eji+ToMtUIqKzoJ5mJIhat+Zak47xZ8jOYjJQBA= +cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= +cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cosmossdk.io/core v1.1.0 h1:iJ7j2DjNsFzg4/z4ImNQYzy2D4LfMCsaQ8Lrz1KCmxk= +cosmossdk.io/core v1.1.0/go.mod h1:qGmJxBFHobvG1k4bROQnueslotBU5MIKZLC57xVBYYI= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= +cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= +cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= +cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= +cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= +cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= +cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= +cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= +cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= +cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= +cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= +cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= +cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= +cosmossdk.io/x/tx v0.13.8 h1:dQwC8jMe7awx/edi1HPPZ40AjHnsix6KSO/jbKMUYKk= +cosmossdk.io/x/tx v0.13.8/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+OS/e+BQsB3gE4mZRQEXY= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b/go.mod h1:9nnw1SlYHYuPSo/3wjQzNjSbeHlq2NsKo5iEtfJPWP0= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= +github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8= +github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= +github.com/adlio/schema v1.4.0 h1:dekxG6P0my/bPvlyWzMULelR2Xej8RGErlnJcoY5ddw= +github.com/adlio/schema v1.4.0/go.mod h1:3/ojUldWBCWp4e+6VN9ets6unG5WdqbjF7vyzM0zTVQ= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.41.6 h1:1AX0AthnBQzMx1vbmir3Y4WsnJgiydmnJjiLu+LvXOg= +github.com/aws/aws-sdk-go-v2 v1.41.6/go.mod h1:dy0UzBIfwSeot4grGvY1AqFWN5zgziMmWGzysDnHFcQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 h1:adBsCIIpLbLmYnkQU+nAChU5yhVTvu5PerROm+/Kq2A= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9/go.mod h1:uOYhgfgThm/ZyAuJGNQ5YgNyOlYfqnGpTHXvk3cpykg= +github.com/aws/aws-sdk-go-v2/config v1.32.16 h1:Q0iQ7quUgJP0F/SCRTieScnaMdXr9h/2+wze1u3cNeM= +github.com/aws/aws-sdk-go-v2/config v1.32.16/go.mod h1:duCCnJEFqpt2RC6no1iK6q+8HpwOAkiUua0pY507dQc= +github.com/aws/aws-sdk-go-v2/credentials v1.19.15 h1:fyvgWTszojq8hEnMi8PPBTvZdTtEVmAVyo+NFLHBhH4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.15/go.mod h1:gJiYyMOjNg8OEdRWOf3CrFQxM2a98qmrtjx1zuiQfB8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22 h1:IOGsJ1xVWhsi+ZO7/NW8OuZZBtMJLZbk4P5HDjJO0jQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.22/go.mod h1:b+hYdbU+jGKfXE8kKM6g1+h+L/Go3vMvzlxBsiuGsxg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 h1:GmLa5Kw1ESqtFpXsx5MmC84QWa/ZrLZvlJGa2y+4kcQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22/go.mod h1:6sW9iWm9DK9YRpRGga/qzrzNLgKpT2cIxb7Vo2eNOp0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 h1:dY4kWZiSaXIzxnKlj17nHnBcXXBfac6UlsAx2qL6XrU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22/go.mod h1:KIpEUx0JuRZLO7U6cbV204cWAEco2iC3l061IxlwLtI= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 h1:FPXsW9+gMuIeKmz7j6ENWcWtBGTe1kH8r9thNt5Uxx4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23/go.mod h1:7J8iGMdRKk6lw2C+cMIphgAnT8uTwBwNOsGkyOCm80U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcbVcGABLOVuPYaIihj6IlkqubBwFj10K5fxRek= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 h1:xnvDEnw+pnj5mctWiYuFbigrEzSm35x7k4KS/ZkCANg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14/go.mod h1:yS5rNogD8e0Wu9+l3MUwr6eENBzEeGejvINpN5PAYfY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 h1:SE+aQ4DEqG53RRCAIHlCf//B2ycxGH7jFkpnAh/kKPM= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22/go.mod h1:ES3ynECd7fYeJIL6+oax+uIEljmfps0S70BaQzbMd/o= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0 h1:696UM+NwOrETBCLQJyCAGtVmmZmziBT59yMwgg6Fvrw= +github.com/aws/aws-sdk-go-v2/service/kms v1.51.0/go.mod h1:GBO/aaEi47QldDVoqw2CsM2UZQDoqDiFIMJD/ztHPs0= +github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1 h1:kU/eBN5+MWNo/LcbNa4hWDdN76hdcd7hocU5kvu7IsU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1/go.mod h1:Fw9aqhJicIVee1VytBBjH+l+5ov6/PhbtIK/u3rt/ls= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.10 h1:a1Fq/KXn75wSzoJaPQTgZO0wHGqE9mjFnylnqEPTchA= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.10/go.mod h1:p6+MXNxW7IA6dMgHfTAzljuwSKD0NCm/4lbS4t6+7vI= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.16 h1:x6bKbmDhsgSZwv6q19wY/u3rLk/3FGjJWyqKcIRufpE= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.16/go.mod h1:CudnEVKRtLn0+3uMV0yEXZ+YZOKnAtUJ5DmDhilVnIw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20 h1:oK/njaL8GtyEihkWMD4k3VgHCT64RQKkZwh0DG5j8ak= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.20/go.mod h1:JHs8/y1f3zY7U5WcuzoJ/yAYGYtNIVPKLIbp61euvmg= +github.com/aws/aws-sdk-go-v2/service/sts v1.42.0 h1:ks8KBcZPh3PYISr5dAiXCM5/Thcuxk8l+PG4+A0exds= +github.com/aws/aws-sdk-go-v2/service/sts v1.42.0/go.mod h1:pFw33T0WLvXU3rw1WBkpMlkgIn54eCB5FYLhjDc9Foo= +github.com/aws/smithy-go v1.25.1 h1:J8ERsGSU7d+aCmdQur5Txg6bVoYelvQJgtZehD12GkI= +github.com/aws/smithy-go v1.25.1/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/bcp-innovations/hyperlane-cosmos v1.1.0 h1:WXt+WrKv2DG/xVIkLvggDRbi/2law104Vj6AWZGxHNw= +github.com/bcp-innovations/hyperlane-cosmos v1.1.0/go.mod h1:NP59yKAk2qFaT7+FSCh7kkoKKLlTxXNdIlxMstAJ5no= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.24.0 h1:H4x4TuulnokZKvHLfzVRTHJfFfnHEeSYJizujEZvmAM= +github.com/bits-and-blooms/bitset v1.24.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= +github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3 h1:oe6fCvaEpkhyW3qAicT0TnGtyht/UrgvOwMcEgLb7Aw= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3/go.mod h1:qdP0gaj0QtgX2RUZhnlVrceJ+Qln8aSlDyJwelLLFeM= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/celestiaorg/boxo v0.29.0-fork-4 h1:A202u8w3Iqjw4ZlqSukfiMbefQEN+740GUj1Z3VI960= +github.com/celestiaorg/boxo v0.29.0-fork-4/go.mod h1:rXql6ncaLZZfLqDG3Cuw9ZYQKd3rMU5bk1TGXF0+ZL0= +github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427114616-9ff33a36eb19 h1:7+h4twHnq0oQaj3N73VqBWtmcIMStXJk6Q5XqwiAmzg= +github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427114616-9ff33a36eb19/go.mod h1:HKqFxEeuWopDU87dCOkLktn6P4N+wEeVm1FdSEESvSQ= +github.com/celestiaorg/celestia-core v0.40.1 h1:JF9gyLKLU5oCFIeTAQtiHBZeOhppozEToN+o8bgvxT8= +github.com/celestiaorg/celestia-core v0.40.1/go.mod h1:3Jhugz4ibMVEP2+7+FjLEDsV4TcU2tHgfGSm4zkSNv4= +github.com/celestiaorg/celestia-node v0.29.3-mocha.0.20260427115656-23bec13f01a3 h1:UrzojEvUNgOiCDZB+4ba7oqhx/FtreUE0JYQ5bMxNtY= +github.com/celestiaorg/celestia-node v0.29.3-mocha.0.20260427115656-23bec13f01a3/go.mod h1:LlQzPvGjZIQKJ9XXgR6yMeRIv+WBUDhlcva++NhnMNs= +github.com/celestiaorg/cosmos-sdk v0.52.3 h1:YPMFCycTw77P7tn+HQHTmmdBwXWNMDOrZ6/xVPK9nvM= +github.com/celestiaorg/cosmos-sdk v0.52.3/go.mod h1:2N4NRio08+WQsB7hsKo/ELXCQSWl78GiYdd9M1H6MpQ= +github.com/celestiaorg/cosmos-sdk/api v0.7.6 h1:81in9Zk+noz0ko+hZFSSK8L1aawFN8/CmdcQAUhbiUU= +github.com/celestiaorg/cosmos-sdk/api v0.7.6/go.mod h1:1BgQSufu6ZQkst3YBIHDCo/TPUrhfU4fV7tOI0ftql8= +github.com/celestiaorg/cosmos-sdk/log v1.1.1-0.20251116153902-f48fea92e627 h1:qYV81fA5E739ZtdMFCjChx0AMY+qBmMVPfRE3ol+VCE= +github.com/celestiaorg/cosmos-sdk/log v1.1.1-0.20251116153902-f48fea92e627/go.mod h1:lQTBplaW3HQLKQdPaQq+ElW6zASAoo9r3bJ7pOr8SWo= +github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0 h1:GyDYfK8dLETlUI7F+w+3QYQgAszUegMXgB6cTbDm7CA= +github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0/go.mod h1:T4K9O18zQNKNpt4YvTL3lcUt4aKOEU05ZIFWVdQi3Ak= +github.com/celestiaorg/go-datastore v0.0.0-20250801131506-48a63ae531e4 h1:udw77BU45zmvTV7798FhR1wHFmsFpu4GnA5mubtMcR0= +github.com/celestiaorg/go-datastore v0.0.0-20250801131506-48a63ae531e4/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= +github.com/celestiaorg/go-header v0.8.5 h1:MkzlioiSeybKVNDa0805fS3mS3NG8ub93Gs2xaKwSZ4= +github.com/celestiaorg/go-header v0.8.5/go.mod h1:DKl6pcKCJ0ehGUgDmfxBNz6Lv0Ky4E1Oyrcx96eQm/4= +github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= +github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 h1:wP84mtwOCVNOTfS3zErICjxKLnh74Z1uf+tdrlSFjVM= +github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3/go.mod h1:86qIYnEhmn/hfW+xvw98NOI3zGaDEB3x8JGjYo2FqLs= +github.com/celestiaorg/go-square/v2 v2.3.3 h1:vhu6Lt39km19Q/Jk4nS3r2cuWJq6jFg+/1+iG8YGftY= +github.com/celestiaorg/go-square/v2 v2.3.3/go.mod h1:vY5RRv+qRmEVjPF6dAdr0dyLwKmTTDHHffENPQw8pUA= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/go-square/v4 v4.0.0-rc4.0.20260318002530-1ca8ff7b42ea h1:gpezhWD8ATU12vl79F0N9PHdZC6CW+5mxFn0gBlUf2Y= +github.com/celestiaorg/go-square/v4 v4.0.0-rc4.0.20260318002530-1ca8ff7b42ea/go.mod h1:UoBkF0lYuU51du9XN6oIJ7nM5Wun+mgbiItQCdxlNJU= +github.com/celestiaorg/ibc-go/v8 v8.7.2 h1:AWae851fdX7pJWlGnUBKlKJzpr4c2t5m4TLs6vDfmAY= +github.com/celestiaorg/ibc-go/v8 v8.7.2/go.mod h1:E3WTax+cfyDIehNRpwEI96/0E8GBtU1g9XWr18qUGZ8= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.24.3 h1:ylQnRlXkVoTtq36CxtCyXYZX4JISBsHgKlAAUAnf7ig= +github.com/celestiaorg/nmt v0.24.3/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= +github.com/celestiaorg/rsmt2d v0.15.2 h1:wHqNqaBboSX5e8Czm4FnBnys4RPp5gSNm4CAcsXAyTU= +github.com/celestiaorg/rsmt2d v0.15.2/go.mod h1:1NyWG9hj7veHbLmpQUKg+77teLuVgq0kpv3FS9nEtL4= +github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= +github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= +github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk= +github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8= +github.com/cockroachdb/redact v1.1.6 h1:zXJBwDZ84xJNlHl1rMyCojqyIxv+7YUpQiJLQ7n4314= +github.com/cockroachdb/redact v1.1.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb h1:3bCgBvB8PbJVMX1ouCcSIxvsqKPYM7gs72o0zC76n9g= +github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coder/websocket v1.8.6 h1:OmNKdwUvLj7VvHnl5o8skaVghSPLjWdHGCnFbkWqs9w= +github.com/coder/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +github.com/cometbft/cometbft-db v1.0.4 h1:cezb8yx/ZWcF124wqUtAFjAuDksS1y1yXedvtprUFxs= +github.com/cometbft/cometbft-db v1.0.4/go.mod h1:M+BtHAGU2XLrpUxo3Nn1nOCcnVCiLM9yx5OuT0u5SCA= +github.com/consensys/gnark v0.14.0 h1:RG+8WxRanFSFBSlmCDRJnYMYYKpH3Ncs5SMzg24B5HQ= +github.com/consensys/gnark v0.14.0/go.mod h1:1IBpDPB/Rdyh55bQRR4b0z1WvfHQN1e0020jCvKP2Gk= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= +github.com/cosmos/gogoproto v1.7.2 h1:5G25McIraOC0mRFv9TVO139Uh3OklV2hczr13KKVHCA= +github.com/cosmos/gogoproto v1.7.2/go.mod h1:8S7w53P1Y1cHwND64o0BnArT6RmdgIvsBuco6uTllsk= +github.com/cosmos/iavl v1.2.8 h1:55F96BGUJ7KT7h+Ky/cEqS+pEvhFqsU4O8Th3F0N1js= +github.com/cosmos/iavl v1.2.8/go.mod h1:FRHN4tO+6crf0p2zsqye+nAbsMgiwdkxpWm18DyP6+Y= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0 h1:rM+S14DFiqmu6Rc3PuhvWqwywPsnt/CbIslSnBftPFs= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0/go.mod h1:O5H9Ic3Pe6cmJn1eqlj5N48sLb8WQ1VWmDP4/11g/4E= +github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= +github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= +github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= +github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= +github.com/cosmos/ledger-cosmos-go v0.15.0 h1:xmizkkEX19tyFLVL6PPMQNg21Jc9W9/bpbwxMDdtxXg= +github.com/cosmos/ledger-cosmos-go v0.15.0/go.mod h1:KJqW5U4/MMl8ICPO4WPjIAyC4TfYRnr28d9N9bBUKWc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cristalhq/jwt/v5 v5.4.0 h1:Wxi1TocFHaijyV608j7v7B9mPc4ZNjvWT3LKBO0d4QI= +github.com/cristalhq/jwt/v5 v5.4.0/go.mod h1:+b/BzaCWEpFDmXxspJ5h4SdJ1N/45KMjKOetWzmHvDA= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 h1:5RVFMOWjMyRy8cARdy79nAmgYw3hK/4HUq48LQ6Wwqo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= +github.com/dgraph-io/badger/v4 v4.9.1 h1:DocZXZkg5JJHJPtUErA0ibyHxOVUDVoXLSCV6t8NC8w= +github.com/dgraph-io/badger/v4 v4.9.1/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-ddmin v0.0.0-20210904190556-96a6d69f1034/go.mod h1:zz4KxBkcXUWKjIcrc+uphJ1gPh/t18ymGm3PmQ+VGTk= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54= +github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= +github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= +github.com/ethereum/go-ethereum v1.17.0 h1:2D+1Fe23CwZ5tQoAS5DfwKFNI1HGcTwi65/kRlAVxes= +github.com/ethereum/go-ethereum v1.17.0/go.mod h1:2W3msvdosS/MCWytpqTcqgFiRYbTH59FxDJzqah120o= +github.com/evstack/ev-node/core v1.0.0 h1:s0Tx0uWHme7SJn/ZNEtee4qNM8UO6PIxXnHhPbbKTz8= +github.com/evstack/ev-node/core v1.0.0/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= +github.com/filecoin-project/go-jsonrpc v0.10.1 h1:iEhgrjO0+rawwOZWRNgexLrWGLA+IEUyWiRRL134Ob8= +github.com/filecoin-project/go-jsonrpc v0.10.1/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.2.1 h1:9fnQVFCCZ9/NOc7ccTNqzoKd1tCWOqeI05/lPqFPMGQ= +github.com/gammazero/deque v1.2.1/go.mod h1:5nSFkzVm+afG9+gy0VIowlqVAW4N8zNcMne+CMQVD2g= +github.com/gammazero/workerpool v1.2.1 h1:MEDvUJsNYGuCvl1RwIXNKu2YtQtHqCSF9XWF04N7lqs= +github.com/gammazero/workerpool v1.2.1/go.mod h1:E32GVRUanF4d6QtRmdss3AScgaDkIyrvPtgRQUWgmx4= +github.com/getsentry/sentry-go v0.42.0 h1:eeFMACuZTbUQf90RE8dE4tXeSe4CZyfvR1MBL7RLEt8= +github.com/getsentry/sentry-go v0.42.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA= +github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.1 h1:4hvbpePJKnIzH1B+8OR/JPbTx37NktoI9LE2QZBBkvE= +github.com/go-logfmt/logfmt v0.6.1/go.mod h1:EV2pOAQoZaT1ZXZbqDl5hrymndi4SY9ED9/z6CO0XAk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= +github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= +github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= +github.com/grafana/pyroscope-go v1.2.8 h1:UvCwIhlx9DeV7F6TW/z8q1Mi4PIm3vuUJ2ZlCEvmA4M= +github.com/grafana/pyroscope-go v1.2.8/go.mod h1:SSi59eQ1/zmKoY/BKwa5rSFsJaq+242Bcrr4wPix1g8= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72 h1:vTCWu1wbdYo7PEZFem/rlr01+Un+wwVmI7wiegFdRLk= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72/go.mod h1:Vn+BBgKQHVQYdVQ4NZDICE1Brb+JfaONyDHr3q07oQc= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.8.6 h1:9sQboWULaydVphxc4S64oAI4YqpuCk7nPmvbk131ebY= +github.com/hashicorp/go-getter v1.8.6/go.mod h1:nVH12eOV2P58dIiL3rsU6Fh3wLeJEKBOJzhMmzlSWoo= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= +github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/raft v1.7.3 h1:DxpEqZJysHN0wK+fviai5mFcSYsCkNpFUl1xpAW8Rbo= +github.com/hashicorp/raft v1.7.3/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= +github.com/hashicorp/raft-boltdb v0.0.0-20251103221153-05f9dd7a5148 h1:tjaIHlfKX22DCCPTx2mK+6N/kTP9DV7B3bxEUyQtjKA= +github.com/hashicorp/raft-boltdb v0.0.0-20251103221153-05f9dd7a5148/go.mod h1:sgCxzMuvQ3huVxgmeDdj73YIMmezWZ40HQu2IPmjJWk= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= +github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 h1:B+aWVgAx+GlFLhtYjIaF0uGjU3rzpl99Wf9wZWt+Mq8= +github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2/go.mod h1:CH/cwcr21pPWH+9GtK/PFaa4OGTv4CtfkCKro6GpbRE= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk= +github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA= +github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= +github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger4 v0.1.8 h1:frNczf5CjCVm62RJ5mW5tD/oLQY/9IKAUpKviRV9QAI= +github.com/ipfs/go-ds-badger4 v0.1.8/go.mod h1:FdqSLA5TMsyqooENB/Hf4xzYE/iH0z/ErLD6ogtfMrA= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8= +github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= +github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= +github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= +github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk= +github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo= +github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= +github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= +github.com/ipfs/go-metrics-prometheus v0.1.0 h1:bApWOHkrH3VTBHzTHrZSfq4n4weOZDzZFxUXv+HyKcA= +github.com/ipfs/go-metrics-prometheus v0.1.0/go.mod h1:2GtL525C/4yxtvSXpRJ4dnE45mCX9AS0XRa03vHx7G0= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0= +github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM= +github.com/ipld/go-ipld-prime v0.22.0 h1:YJhDhjEOvOYaqshd3b4atIWUoRg/rKrgmwCyUHwlbuY= +github.com/ipld/go-ipld-prime v0.22.0/go.mod h1:ol7vKxOOVgEh0iAPuiDalM+0gScXVMA5ZZa4DVrTnEA= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= +github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a h1:aP94idRf0yhG07gBSIyW3sy/cd+XNLWnghSp11y0oIc= +github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a/go.mod h1:yjqqjgMTQkBUHSG97/rm4zipffCNbCiZcB3kTqr++sQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.12.3 h1:tTWxr2YLKwIvK90ZXEw8GP7UFHtcbTtty8zsI+YjrfQ= +github.com/lib/pq v1.12.3/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= +github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= +github.com/libp2p/go-libp2p v0.48.0 h1:h2BrLAgrj7X8bEN05K7qmrjpNHYA+6tnsGRdprjTnvo= +github.com/libp2p/go-libp2p v0.48.0/go.mod h1:Q1fBZNdmC2Hf82husCTfkKJVfHm2we5zk+NWmOGEmWk= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.39.1 h1:9RzUBc7zywT4ZNamRSgEvPZmVlK3Y6xdlCYfXXvlR/Q= +github.com/libp2p/go-libp2p-kad-dht v0.39.1/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= +github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= +github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= +github.com/libp2p/go-libp2p-pubsub v0.16.0 h1:j7G2C8kJwkcAQqYR7Wmq3d75d3Sgw/N0Hhiv0dVx7OY= +github.com/libp2p/go-libp2p-pubsub v0.16.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q= +github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linxGnu/grocksdb v1.9.8 h1:vOIKv9/+HKiqJAElJIEYv3ZLcihRxyP7Suu/Mu8Dxjs= +github.com/linxGnu/grocksdb v1.9.8/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY= +github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.4 h1:asJizugGgchQod2ja9NJlGOWq4s7KsAWr5XUc9Clgl4= +github.com/minio/highwayhash v1.0.4/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.5.0 h1:p/FTyHKX0nl59f+S+dEUe8HRK+i5Ow/QHMw8Nh3gPCo= +github.com/multiformats/go-multiaddr-dns v0.5.0/go.mod h1:yJ349b8TPIAANUyuOzn1oz9o22tV9f+06L+cCeMxC14= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= +github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= +github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM= +github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v3 v3.1.2 h1:gqEdOUXLtCGW+afsBLO0LtDD8GnuBBjEy6HRtyofZTc= +github.com/pion/dtls/v3 v3.1.2/go.mod h1:Hw/igcX4pdY69z1Hgv5x7wJFrUkdgHwAn/Q/uo7YHRo= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= +github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8= +github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo= +github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI= +github.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= +github.com/pion/stun/v3 v3.1.1 h1:CkQxveJ4xGQjulGSROXbXq94TAWu8gIX2dT+ePhUkqw= +github.com/pion/stun/v3 v3.1.1/go.mod h1:qC1DfmcCTQjl9PBaMa5wSn3x9IPmKxSdcCsxBcDBndM= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o= +github.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a h1:cgqrm0F3zwf9IPzca7xN4w+Zy6MC9ZkPvAC8QEWa/iQ= +github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a/go.mod h1:ocZfO/tLSHqfScRDNTJbAJR1by4D1lewauX9OwTaPuY= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= +github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= +github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI= +github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rollkit/go-da v0.9.0 h1:ECpS7RSLE84w61Y5a93RfaZrAZKeiyfjVwGxzjsW2cU= +github.com/rollkit/go-da v0.9.0/go.mod h1:LQImomegjZ/dEQLKDJA7RdUnOLINSZJwI7q1CvFPWJw= +github.com/ronanh/intcomp v1.1.1 h1:+1bGV/wEBiHI0FvzS7RHgzqOpfbBJzLIxkqMJ9e6yxY= +github.com/ronanh/intcomp v1.1.1/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGketo0IU= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.35.1 h1:m7xQeoiLIiV0BCEY4Hs+j2NG4Gp2o2KPKmhnnLiazKI= +github.com/rs/zerolog v1.35.1/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.3.9 h1:fiaT9rB7g5sr5ddNZvlwheclN9IP86eFW9WgqlEQV+w= +github.com/sasha-s/go-deadlock v0.3.9/go.mod h1:KuZj51ZFmx42q/mPaYbRk0P1xcwe697zsJKE03vD4/Y= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zondax/golem v0.27.0 h1:IbBjGIXF3SoGOZHsILJvIM/F/ylwJzMcHAcggiqniPw= +github.com/zondax/golem v0.27.0/go.mod h1:AmorCgJPt00L8xN1VrMBe13PSifoZksnQ1Ge906bu4A= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v1.0.1 h1:Ks/2tz/dOF+dbRynfZ0dEhcdL1lqw43Sa0zMXHpQ3aQ= +github.com/zondax/ledger-go v1.0.1/go.mod h1:j7IgMY39f30apthJYMd1YsHZRqdyu4KbVmUp0nU78X0= +gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/bridges/prometheus v0.67.0 h1:dkBzNEAIKADEaFnuESzcXvpd09vxvDZsOjx11gjUqLk= +go.opentelemetry.io/contrib/bridges/prometheus v0.67.0/go.mod h1:Z5RIwRkZgauOIfnG5IpidvLpERjhTninpP1dTG2jTl4= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 h1:0Qx7VGBacMm9ZENQ7TnNObTYI4ShC+lHI16seduaxZo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0/go.mod h1:Sje3i3MjSPKTSPvVWCaL8ugBzJwik3u4smCjUeuupqg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/contrib/instrumentation/runtime v0.68.0 h1:jhVIQEprwUTV+KfzzliLidclhoTOoHTgdz96kAyR8mU= +go.opentelemetry.io/contrib/instrumentation/runtime v0.68.0/go.mod h1:4HsdbLUbernaTnA8CNaNE+1g026SciXb3juRYe3l8EY= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c h1:6a8FdnNk6bTXBjR4AGKFgUKuo+7GnR3FX5L7CbveeZc= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d h1:wT2n40TBqFY6wiwazVK9/iTWbsQrgk5ZfCSVFLO9LQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/vmihailenco/msgpack.v2 v2.9.2/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/tools/celestia-node-fiber/listen.go b/tools/celestia-node-fiber/listen.go new file mode 100644 index 0000000000..9415a9c87a --- /dev/null +++ b/tools/celestia-node-fiber/listen.go @@ -0,0 +1,144 @@ +package celestianodefiber + +import ( + "context" + "errors" + "fmt" + + appfibre "github.com/celestiaorg/celestia-app/v9/fibre" + libshare "github.com/celestiaorg/go-square/v4/share" + + "github.com/celestiaorg/celestia-node/blob" + + "github.com/evstack/ev-node/block" +) + +// Listen implements fiber.DA.Listen. It subscribes to blob.Subscribe on the +// bridge node starting at fromHeight and forwards only share-version-2 +// (Fibre) blobs as BlobEvents. PFB blobs (v0/v1) sharing the namespace are +// dropped so consumers see a pure Fibre event stream. +// +// fromHeight == 0 starts the stream at the chain head (live follow). +// fromHeight > 0 replays from that block forward via the node's +// WaitForHeight loop so a subscriber can resume after a restart without +// missing blobs. +// +// DataSize on emitted events is the original payload byte length — matching +// the fibermock contract ev-node consumers code against. The v2 share only +// carries (fibre_blob_version + commitment), so the real size isn't derivable +// from the subscription alone; Listen therefore performs a Download per event +// to recover the size before forwarding. This adds one FSP round-trip per +// blob. If that cost becomes material we can expose an opt-out mode, but for +// now correctness over latency. +func (a *Adapter) Listen(ctx context.Context, namespace []byte, fromHeight uint64) (<-chan block.FiberBlobEvent, error) { + ns, err := toV0Namespace(namespace) + if err != nil { + return nil, fmt.Errorf("namespace: %w", err) + } + sub, err := a.blob.Subscribe(ctx, ns, fromHeight) + if err != nil { + return nil, fmt.Errorf("subscribing to blob stream: %w", err) + } + out := make(chan block.FiberBlobEvent, a.listenChannelSz) + go a.forwardFibreBlobs(ctx, sub, out) + return out, nil +} + +// forwardFibreBlobs drains a blob.SubscriptionResponse stream and emits a +// BlobEvent per share-version-2 blob. The output channel is closed when the +// subscription closes or ctx is cancelled. +func (a *Adapter) forwardFibreBlobs( + ctx context.Context, + sub <-chan *blob.SubscriptionResponse, + out chan<- block.FiberBlobEvent, +) { + defer close(out) + for { + select { + case resp, ok := <-sub: + if !ok { + return + } + if resp == nil { + continue + } + height := resolveHeight(resp) + for _, b := range resp.Blobs { + if b == nil || !b.IsFibreBlob() { + continue + } + event, err := a.fibreBlobToEvent(ctx, b.Blob, height) + if err != nil { + // Skip a malformed or un-fetchable v2 blob rather than + // kill the subscription. Most likely causes: the v2 + // payload was garbage-collected from FSPs, or the + // download was cancelled. Either way the consumer has + // no actionable signal for this single blob. + continue + } + select { + case out <- event: + case <-ctx.Done(): + return + } + } + case <-ctx.Done(): + return + } + } +} + +// resolveHeight picks the authoritative height from the subscription +// response. celestia-node flagged resp.Height as deprecated in favour of +// resp.Header.Height(); use the header when present, fall back otherwise. +func resolveHeight(resp *blob.SubscriptionResponse) uint64 { + if resp.Header != nil { + return uint64(resp.Header.Height) + } + return resp.Height +} + +// fibreBlobToEvent reconstructs the Fibre BlobID (version byte + 32-byte +// commitment) from a share-version-2 libshare.Blob, downloads the blob to +// determine the original payload size, and wraps everything as a BlobEvent. +// +// The Download is what makes DataSize accurate. Without it we would have to +// either report the v2 share size (wrong — misleads consumers) or zero +// (lossy). See the Listen doc for the cost / correctness rationale. +func (a *Adapter) fibreBlobToEvent( + ctx context.Context, + b *libshare.Blob, + height uint64, +) (block.FiberBlobEvent, error) { + version, err := b.FibreBlobVersion() + if err != nil { + return block.FiberBlobEvent{}, err + } + commit, err := b.FibreCommitment() + if err != nil { + return block.FiberBlobEvent{}, err + } + if len(commit) != appfibre.CommitmentSize { + return block.FiberBlobEvent{}, fmt.Errorf( + "fibre commitment must be %d bytes, got %d", + appfibre.CommitmentSize, len(commit), + ) + } + var c appfibre.Commitment + copy(c[:], commit) + id := appfibre.NewBlobID(uint8(version), c) + + res, err := a.fibre.Download(ctx, id) + if err != nil { + return block.FiberBlobEvent{}, fmt.Errorf("resolving payload size via Download: %w", err) + } + if res == nil { + return block.FiberBlobEvent{}, errors.New("fibre.Download returned nil result while resolving payload size") + } + + return block.FiberBlobEvent{ + BlobID: block.FiberBlobID(id), + Height: height, + DataSize: uint64(len(res.Data)), + }, nil +} diff --git a/tools/celestia-node-fiber/testing/bridge.go b/tools/celestia-node-fiber/testing/bridge.go new file mode 100644 index 0000000000..a6dcc2ce7e --- /dev/null +++ b/tools/celestia-node-fiber/testing/bridge.go @@ -0,0 +1,135 @@ +//go:build fibre + +package cnfibertest + +import ( + "context" + "crypto/rand" + "net" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cristalhq/jwt/v5" + "github.com/stretchr/testify/require" + "go.uber.org/fx" + + appfibre "github.com/celestiaorg/celestia-app/v9/fibre" + + "github.com/celestiaorg/celestia-node/api/client" + "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + stateapi "github.com/celestiaorg/celestia-node/nodebuilder/state" +) + +// Bridge bundles an in-process celestia-node bridge node and the admin +// JWT that grants it authenticated RPC access. The adapter's ReadConfig +// needs both the address and the token for Blob.Subscribe to work. +type Bridge struct { + Node *nodebuilder.Node + AdminToken string +} + +// RPCAddr returns a WebSocket URL the adapter uses in +// Config.ReadConfig.BridgeDAAddr. WebSocket (not HTTP) is required +// because Blob.Subscribe returns a channel; go-jsonrpc only supports +// channel-returning methods over a streaming transport. +func (b *Bridge) RPCAddr() string { + return "ws://" + b.Node.RPCServer.ListenAddr() +} + +// StartBridge brings up an in-process celestia-node bridge connected to +// the Network's consensus gRPC endpoint. Mirrors celestia-node's +// api/client test helpers so TestShowcase has a real JSON-RPC server for +// Blob.Subscribe. +func StartBridge(t *testing.T, ctx context.Context, network *Network) *Bridge { + t.Helper() + + cfg := nodebuilder.DefaultConfig(node.Bridge) + + ip, port, err := net.SplitHostPort(network.ConsensusGRPCAddr()) + require.NoError(t, err, "splitting consensus gRPC addr") + cfg.Core.IP = ip + cfg.Core.Port = port + // Pin the bridge RPC to an ephemeral port; the test discovers it via + // Node.RPCServer.ListenAddr() after Start. + cfg.RPC.Port = "0" + + tempDir := t.TempDir() + store := nodebuilder.MockStore(t, cfg) + + auth, adminToken := bridgeAuth(t) + kr := bridgeKeyring(t, tempDir) + + bn, err := nodebuilder.New(node.Bridge, p2p.Private, store, + auth, + stateapi.WithKeyring(kr), + stateapi.WithKeyName(stateapi.AccountName(bridgeSigningKey)), + fx.Replace(node.StorePath(tempDir)), + ) + require.NoError(t, err, "constructing bridge node") + + require.NoError(t, bn.Start(ctx), "starting bridge node") + t.Cleanup(func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + _ = bn.Stop(stopCtx) + }) + + return &Bridge{ + Node: bn, + AdminToken: adminToken, + } +} + +// bridgeSigningKey is the keyring account the bridge uses for its own +// tx submissions. Distinct from the client's account so the two keyrings +// don't collide. +const bridgeSigningKey = "bridge-signer" + +func bridgeKeyring(t *testing.T, tempDir string) keyring.Keyring { + t.Helper() + + kr, err := client.KeyringWithNewKey(client.KeyringConfig{ + KeyName: bridgeSigningKey, + BackendName: keyring.BackendTest, + }, tempDir) + require.NoError(t, err, "creating bridge keyring") + + // The Fibre module on the bridge expects a key under + // appfibre.DefaultKeyName to exist, even though our client never uses + // the bridge's Fibre module for Upload/Download. Without it, + // fx.Start fails during fibre module wiring. + _, _, err = kr.NewMnemonic( + appfibre.DefaultKeyName, + keyring.English, "", "", hd.Secp256k1, + ) + require.NoError(t, err, "provisioning bridge fibre key") + return kr +} + +// bridgeAuth creates an HS256 JWT signer pair and an admin token. The +// returned fx option injects the signer/verifier into the node; the +// token is what the adapter puts in ReadConfig.DAAuthToken. +func bridgeAuth(t *testing.T) (fx.Option, string) { + t.Helper() + + key := make([]byte, 32) + _, err := rand.Read(key) + require.NoError(t, err, "rand.Read jwt key") + + signer, err := jwt.NewSignerHS(jwt.HS256, key) + require.NoError(t, err) + verifier, err := jwt.NewVerifierHS(jwt.HS256, key) + require.NoError(t, err) + + token, err := perms.NewTokenWithPerms(signer, perms.AllPerms) + require.NoError(t, err) + + return fx.Decorate(func() (jwt.Signer, jwt.Verifier, error) { + return signer, verifier, nil + }), string(token) +} diff --git a/tools/celestia-node-fiber/testing/doc.go b/tools/celestia-node-fiber/testing/doc.go new file mode 100644 index 0000000000..639adf723c --- /dev/null +++ b/tools/celestia-node-fiber/testing/doc.go @@ -0,0 +1,16 @@ +//go:build fibre + +// Package cnfibertest wires a single-validator Celestia chain, an in-process +// Fibre server, a celestia-node bridge and the celestia-node-fiber adapter +// together so Upload → Listen → Download can be exercised end-to-end in a +// Go test. +// +// The chain is a celestia-app testnode built with -tags fibre. The Fibre +// server runs in the same process and its FSP endpoint is registered with +// the valaddr module so the client's host registry can find it. The +// underlying adapter talks directly to consensus gRPC and the Fibre server; +// only Listen goes through the bridge node's blob subscription. +// +// This is the "fast sanity" variant. A multi-validator showcase is planned +// as a Docker Compose follow-up that exercises real quorum collection. +package cnfibertest diff --git a/tools/celestia-node-fiber/testing/evnode_fiber_test.go b/tools/celestia-node-fiber/testing/evnode_fiber_test.go new file mode 100644 index 0000000000..2220f42751 --- /dev/null +++ b/tools/celestia-node-fiber/testing/evnode_fiber_test.go @@ -0,0 +1,309 @@ +//go:build fibre + +package cnfibertest_test + +import ( + "context" + "crypto/rand" + "fmt" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block" + coreexecution "github.com/evstack/ev-node/core/execution" + "github.com/evstack/ev-node/node" + "github.com/evstack/ev-node/pkg/config" + datypes "github.com/evstack/ev-node/pkg/da/types" + genesispkg "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/p2p" + "github.com/evstack/ev-node/pkg/p2p/key" + "github.com/evstack/ev-node/pkg/sequencers/solo" + pkgsigner "github.com/evstack/ev-node/pkg/signer" + "github.com/evstack/ev-node/pkg/signer/file" + "github.com/evstack/ev-node/pkg/store" + + "github.com/celestiaorg/celestia-node/api/client" + + cnfiber "github.com/evstack/ev-node/tools/celestia-node-fiber" + cnfibertest "github.com/evstack/ev-node/tools/celestia-node-fiber/testing" +) + +const ( + evnodeBlockTime = 200 * time.Millisecond + evnodeDABlockTime = 1 * time.Second + evnodeHeaderNS = "ev-fib-ht" + evnodeDataNS = "ev-fib-da" + evnodeChainID = "ev-fiber-test" + evnodeBlockTimeout = 30 * time.Second + evnodePassphrase = "test-passphrase-evnode" +) + +// TestEvNode_FiberDA_Posting wires a full ev-node in-memory to the +// celestia-node-fiber adapter and verifies that block data is posted +// to the Fibre DA layer. The test: +// - Starts a single-validator Celestia chain + Fibre server + bridge +// - Creates a celestia-node-fiber adapter (block.FiberClient) +// - Constructs an ev-node aggregator node that uses the adapter as DA +// - Subscribes to the data namespace via adapter.Listen before uploading +// - Injects a transaction and waits for block production +// - Confirms the DA submitter pushed blobs to Fiber by receiving events +// on the subscription and round-tripping each through Download +func TestEvNode_FiberDA_Posting(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + t.Cleanup(cancel) + + network := cnfibertest.StartNetwork(t, ctx) + bridge := cnfibertest.StartBridge(t, ctx, network) + + adapter, err := cnfiber.New(ctx, cnfiber.Config{ + Client: client.Config{ + ReadConfig: client.ReadConfig{ + BridgeDAAddr: bridge.RPCAddr(), + DAAuthToken: bridge.AdminToken, + EnableDATLS: false, + }, + SubmitConfig: client.SubmitConfig{ + DefaultKeyName: network.ClientAccount, + Network: "private", + CoreGRPCConfig: client.CoreGRPCConfig{ + Addr: network.ConsensusGRPCAddr(), + }, + }, + }, + }, network.Consensus.Keyring) + require.NoError(t, err, "constructing adapter") + t.Cleanup(func() { _ = adapter.Close() }) + + // Subscribe to the header namespace BEFORE starting the node so we + // don't race against the first DA submission. fromHeight=0 follows + // the live tip. The adapter expects the 10-byte v0 namespace ID + // (the last 10 bytes of the full 29-byte namespace), matching what + // fiberDAClient.Submit extracts before calling fiber.Upload. + fullHeaderNS := datypes.NamespaceFromString(evnodeHeaderNS).Bytes() + headerNSID := fullHeaderNS[len(fullHeaderNS)-10:] + events, err := adapter.Listen(ctx, headerNSID, 0) + require.NoError(t, err, "starting fiber Listen on header namespace") + + rollnode, exec, nodeCleanup := newFiberEvNode(t, ctx, adapter) + t.Cleanup(nodeCleanup) + + nodeErrCh := make(chan error, 1) + go func() { + defer func() { + if r := recover(); r != nil { + nodeErrCh <- fmt.Errorf("node panicked: %v", r) + } + }() + nodeErrCh <- rollnode.Run(ctx) + }() + + txPayload := fmt.Sprintf("fiber-key=fiber-value-%d", time.Now().UnixNano()) + exec.InjectTx([]byte(txPayload)) + + require.Eventually(t, func() bool { + stats := exec.Stats() + t.Logf("blocks=%d txs=%d", stats.BlocksProduced, stats.TotalExecutedTxs) + return stats.BlocksProduced >= 1 && stats.TotalExecutedTxs >= 1 + }, evnodeBlockTimeout, 200*time.Millisecond, "ev-node should produce at least one block with the transaction") + + // Drain at least one Fiber BlobEvent from the subscription to prove + // the DA submitter pushed data through the fiber adapter's Upload + // path and the settlement landed on-chain. + var seen []block.FiberBlobEvent + require.Eventually(t, func() bool { + select { + case ev, ok := <-events: + if !ok { + return false + } + seen = append(seen, ev) + t.Logf("fiber event: blob_id=%x height=%d data_size=%d", + ev.BlobID, ev.Height, ev.DataSize) + return true + default: + return false + } + }, evnodeBlockTimeout, 500*time.Millisecond, "expected at least one Fiber BlobEvent from DA submission") + + for _, ev := range seen { + got, err := adapter.Download(ctx, ev.BlobID) + require.NoError(t, err, "adapter.Download blob_id=%x", ev.BlobID) + require.NotEmpty(t, got, "downloaded blob must not be empty") + t.Logf("download ok: blob_id=%x bytes=%d", ev.BlobID, len(got)) + } + + select { + case err := <-nodeErrCh: + t.Fatalf("node exited unexpectedly: %v", err) + default: + } +} + +type inMemExecutor struct { + mu sync.Mutex + data map[string]string + + txChan chan []byte + blocksProduced uint64 + totalExecutedTxs uint64 +} + +func newInMemExecutor() *inMemExecutor { + return &inMemExecutor{ + data: make(map[string]string), + txChan: make(chan []byte, 10000), + } +} + +func (e *inMemExecutor) InjectTx(tx []byte) { + select { + case e.txChan <- tx: + default: + } +} + +type execStats struct { + BlocksProduced uint64 + TotalExecutedTxs uint64 +} + +func (e *inMemExecutor) Stats() execStats { + e.mu.Lock() + defer e.mu.Unlock() + return execStats{BlocksProduced: e.blocksProduced, TotalExecutedTxs: e.totalExecutedTxs} +} + +func (e *inMemExecutor) InitChain(_ context.Context, _ time.Time, _ uint64, _ string) ([]byte, error) { + return []byte("inmem-genesis-root"), nil +} + +func (e *inMemExecutor) GetTxs(_ context.Context) ([][]byte, error) { + var txs [][]byte + for { + select { + case tx := <-e.txChan: + txs = append(txs, tx) + default: + return txs, nil + } + } +} + +func (e *inMemExecutor) ExecuteTxs(_ context.Context, txs [][]byte, _ uint64, _ time.Time, _ []byte) ([]byte, error) { + e.mu.Lock() + defer e.mu.Unlock() + for _, tx := range txs { + k, v, ok := parseKV(tx) + if ok { + e.data[k] = v + } + } + e.blocksProduced++ + e.totalExecutedTxs += uint64(len(txs)) + return []byte(fmt.Sprintf("root-%d", e.blocksProduced)), nil +} + +func (e *inMemExecutor) SetFinal(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) Rollback(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) GetExecutionInfo(_ context.Context) (coreexecution.ExecutionInfo, error) { + return coreexecution.ExecutionInfo{MaxGas: 0}, nil +} +func (e *inMemExecutor) FilterTxs(_ context.Context, txs [][]byte, _, _ uint64, _ bool) ([]coreexecution.FilterStatus, error) { + st := make([]coreexecution.FilterStatus, len(txs)) + for i := range st { + st[i] = coreexecution.FilterOK + } + return st, nil +} + +func parseKV(tx []byte) (string, string, bool) { + s := string(tx) + for i := 0; i < len(s); i++ { + if s[i] == '=' { + return s[:i], s[i+1:], true + } + } + return "", "", false +} + +func newFiberEvNode(t *testing.T, ctx context.Context, fiberClient block.FiberClient) (node.Node, *inMemExecutor, func()) { + t.Helper() + + tmpDir := t.TempDir() + logger := zerolog.New(zerolog.NewTestWriter(t)).With().Timestamp().Logger() + + // Create a file-backed signer so the executor can sign blocks. + signerDir := tmpDir + fs, err := file.CreateFileSystemSigner(signerDir, []byte(evnodePassphrase)) + require.NoError(t, err, "creating file signer") + signerAddr, err := fs.GetAddress() + require.NoError(t, err, "getting signer address") + + // Generate a separate libp2p node key for P2P networking. + nodePrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err, "generating node key") + nodeKey := &key.NodeKey{PrivKey: nodePrivKey} + + genesis := genesispkg.NewGenesis(evnodeChainID, 1, time.Now(), signerAddr) + require.NoError(t, genesis.Validate(), "validating genesis") + + cfg := config.DefaultConfig() + cfg.RootDir = tmpDir + cfg.DBPath = "data" + cfg.Node.Aggregator = true + cfg.Node.BlockTime = config.DurationWrapper{Duration: evnodeBlockTime} + cfg.Node.LazyMode = false + cfg.DA.BlockTime = config.DurationWrapper{Duration: evnodeDABlockTime} + cfg.DA.Namespace = evnodeHeaderNS + cfg.DA.DataNamespace = evnodeDataNS + cfg.DA.BatchingStrategy = "immediate" + cfg.DA.Fiber.Enabled = true + cfg.DA.RequestTimeout = config.DurationWrapper{Duration: 60 * time.Second} + cfg.P2P.ListenAddress = "/ip4/0.0.0.0/tcp/0" + cfg.P2P.DisableConnectionGater = true + cfg.Instrumentation.Prometheus = false + cfg.Instrumentation.Pprof = false + cfg.RPC.Address = "127.0.0.1:0" + cfg.Log.Level = "debug" + cfg.Signer.SignerType = "file" + cfg.Signer.SignerPath = signerDir + + // Build the full signer via the factory (needed for consistency with + // how the real node boots). + signer, err := pkgsigner.NewSigner(ctx, &cfg, evnodePassphrase) + require.NoError(t, err, "creating signer via factory") + + ds, err := store.NewDefaultKVStore(tmpDir, cfg.DBPath, "testdb") + require.NoError(t, err, "creating datastore") + + executor := newInMemExecutor() + sequencer := solo.NewSoloSequencer(logger, []byte(genesis.ChainID), executor) + daClient := block.NewFiberDAClient(fiberClient, cfg, logger, 0) + p2pClient, err := p2p.NewClient(cfg.P2P, nodeKey.PrivKey, datastore.NewMapDatastore(), genesis.ChainID, logger, nil) + require.NoError(t, err, "creating p2p client") + + rollnode, err := node.NewNode( + cfg, + executor, + sequencer, + daClient, + signer, + p2pClient, + genesis, + ds, + node.DefaultMetricsProvider(cfg.Instrumentation), + logger, + node.NodeOptions{}, + ) + require.NoError(t, err, "creating node") + + return rollnode, executor, func() {} +} + +var _ coreexecution.Executor = (*inMemExecutor)(nil) diff --git a/tools/celestia-node-fiber/testing/network.go b/tools/celestia-node-fiber/testing/network.go new file mode 100644 index 0000000000..7d9d2660aa --- /dev/null +++ b/tools/celestia-node-fiber/testing/network.go @@ -0,0 +1,222 @@ +//go:build fibre + +package cnfibertest + +import ( + "context" + "path/filepath" + "testing" + "time" + + sdkmath "cosmossdk.io/math" + "github.com/cometbft/cometbft/privval" + core "github.com/cometbft/cometbft/types" + "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + appfibre "github.com/celestiaorg/celestia-app/v9/fibre" + "github.com/celestiaorg/celestia-app/v9/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v9/pkg/user" + "github.com/celestiaorg/celestia-app/v9/test/util/testnode" + fibretypes "github.com/celestiaorg/celestia-app/v9/x/fibre/types" + valtypes "github.com/celestiaorg/celestia-app/v9/x/valaddr/types" +) + +const ( + // defaultChainID matches the celestia-node bridge test helper so the + // bridge and consensus node agree on network identity. + defaultChainID = "private" + + // escrowDeposit is a generous initial escrow. Uploads consume from + // escrow for gas + blob fees, so leave headroom for multiple runs. + escrowDeposit = 50_000_000 // 50 TIA in utia + + // clientAccount is the keyring account the adapter uses to sign + // payment promises and MsgPayForFibre. Pre-funded in genesis; also + // has a funded escrow after StartNetwork returns. + clientAccount = appfibre.DefaultKeyName +) + +// Network bundles a single-validator celestia-app chain, an in-process +// Fibre server registered via valaddr, and a funded escrow account. The +// caller's *testing.T cleanup stops everything. +type Network struct { + // Consensus is the celestia-app testnode context (keyring, gRPC + // client, home dir). Use Consensus.GRPCClient for consensus gRPC + // access and Consensus.Keyring for signing. + Consensus testnode.Context + + // FibreServer is the in-process Fibre gRPC server registered with + // the chain's valaddr module. The adapter (via appfibre.Client's + // default host registry) discovers it through valaddr. + FibreServer *appfibre.Server + + // ChainID matches what the bridge node needs to be configured with. + ChainID string + + // ClientAccount is the keyring name for the pre-funded + pre-escrowed + // account the showcase uses. Expose it so the test wires SubmitConfig. + ClientAccount string +} + +// StartNetwork boots a single-validator Fibre chain + server and returns +// a ready-to-use Network. Registration + escrow funding is complete when +// this returns. +func StartNetwork(t *testing.T, ctx context.Context) *Network { + t.Helper() + + cfg := testnode.DefaultConfig(). + WithChainID(defaultChainID). + WithFundedAccounts(clientAccount). + WithDelayedPrecommitTimeout(50 * time.Millisecond) + + cctx, _, grpcAddr := testnode.NewNetwork(t, cfg) + _, err := cctx.WaitForHeight(1) + require.NoError(t, err, "waiting for first block") + + server := startFibreServer(t, ctx, cctx, grpcAddr) + // Register the fibre server's address in plain `host:port` form — + // celestia-app's x/valaddr now requires it (no scheme prefix), and + // the gRPC dialer accepts bare `host:port` directly via the + // passthrough resolver. + registerValidator(t, ctx, cctx, server.ListenAddress()) + fundEscrow(t, ctx, cctx) + + return &Network{ + Consensus: cctx, + FibreServer: server, + ChainID: defaultChainID, + ClientAccount: clientAccount, + } +} + +// ConsensusGRPCAddr returns the host:port of the chain's gRPC endpoint +// that the adapter's SubmitConfig.CoreGRPCConfig should point at. +func (n *Network) ConsensusGRPCAddr() string { + return n.Consensus.GRPCClient.Target() +} + +// startFibreServer spins up an in-process Fibre gRPC server bound to an +// ephemeral localhost port. The server uses the testnode's private +// validator key for BLS signing, and keeps blob data in memory. +func startFibreServer( + t *testing.T, + ctx context.Context, + cctx testnode.Context, + appGRPCAddr string, +) *appfibre.Server { + t.Helper() + + pvKey := filepath.Join(cctx.HomeDir, "config", "priv_validator_key.json") + pvState := filepath.Join(cctx.HomeDir, "data", "priv_validator_state.json") + filePV := privval.LoadFilePV(pvKey, pvState) + + serverCfg := appfibre.DefaultServerConfig() + serverCfg.AppGRPCAddress = appGRPCAddr + serverCfg.ServerListenAddress = "127.0.0.1:0" + serverCfg.SignerFn = func(string) (core.PrivValidator, error) { + return filePV, nil + } + serverCfg.StoreFn = func(sc appfibre.StoreConfig) (*appfibre.Store, error) { + return appfibre.NewMemoryStore(sc), nil + } + + server, err := appfibre.NewServer(serverCfg) + require.NoError(t, err, "creating fibre server") + require.NoError(t, server.Start(ctx), "starting fibre server") + t.Cleanup(func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + _ = server.Stop(stopCtx) + }) + return server +} + +// registerValidator submits MsgSetFibreProviderInfo so the chain's +// valaddr module maps the validator's consensus address to the Fibre +// server's listen address. Without this the client's host registry +// cannot locate any FSPs. +func registerValidator( + t *testing.T, + ctx context.Context, + cctx testnode.Context, + fibreAddr string, +) { + t.Helper() + + stakingClient := stakingtypes.NewQueryClient(cctx.GRPCClient) + validators, err := stakingClient.Validators(ctx, &stakingtypes.QueryValidatorsRequest{}) + require.NoError(t, err) + require.Len(t, validators.Validators, 1, "single-validator testnode expected") + valOperator := validators.Validators[0].OperatorAddress + + txClient, err := testnode.NewTxClientFromContext(cctx) + require.NoError(t, err) + + msg := &valtypes.MsgSetFibreProviderInfo{ + Signer: valOperator, + Host: fibreAddr, + } + resp, err := txClient.SubmitTx(ctx, []sdk.Msg{msg}, user.SetGasLimit(200_000), user.SetFee(5_000)) + require.NoError(t, err, "registering validator fibre host") + require.Equal(t, uint32(0), resp.Code, "register validator tx failed") + require.NoError(t, cctx.WaitForNextBlock()) + + // Sanity-check the registration landed. + tmClient := cmtservice.NewServiceClient(cctx.GRPCClient) + valSet, err := tmClient.GetLatestValidatorSet(ctx, &cmtservice.GetLatestValidatorSetRequest{}) + require.NoError(t, err) + require.Len(t, valSet.Validators, 1) + consAddr, err := sdk.ConsAddressFromBech32(valSet.Validators[0].Address) + require.NoError(t, err) + + valAddrClient := valtypes.NewQueryClient(cctx.GRPCClient) + info, err := valAddrClient.FibreProviderInfo(ctx, &valtypes.QueryFibreProviderInfoRequest{ + ValidatorConsensusAddress: consAddr.String(), + }) + require.NoError(t, err) + require.True(t, info.Found, "fibre provider info not registered") + require.Equal(t, fibreAddr, info.Info.Host) +} + +// fundEscrow deposits enough utia into the client's escrow account to +// cover payment promises for several blob uploads. The async PFF +// settlement kicked off by adapter.Upload debits this account. +func fundEscrow(t *testing.T, ctx context.Context, cctx testnode.Context) { + t.Helper() + + info, err := cctx.Keyring.Key(clientAccount) + require.NoError(t, err, "loading client keyring entry") + addr, err := info.GetAddress() + require.NoError(t, err) + + ecfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + txClient, err := user.SetupTxClient( + ctx, cctx.Keyring, cctx.GRPCClient, ecfg, + user.WithDefaultAccount(clientAccount), + ) + require.NoError(t, err, "setting up funded-account tx client") + + amount := sdk.NewCoin(appconsts.BondDenom, sdkmath.NewInt(escrowDeposit)) + msg := &fibretypes.MsgDepositToEscrow{ + Signer: addr.String(), + Amount: amount, + } + resp, err := txClient.SubmitTx(ctx, []sdk.Msg{msg}, user.SetGasLimit(200_000), user.SetFee(5_000)) + require.NoError(t, err, "depositing to escrow") + require.Equal(t, uint32(0), resp.Code, "deposit tx failed") + require.NoError(t, cctx.WaitForNextBlock()) + + // Sanity: escrow is now visible. + queryClient := fibretypes.NewQueryClient(cctx.GRPCClient) + escrow, err := queryClient.EscrowAccount(ctx, &fibretypes.QueryEscrowAccountRequest{ + Signer: addr.String(), + }) + require.NoError(t, err) + require.True(t, escrow.Found, "escrow account not found after deposit") + require.Equal(t, amount, escrow.EscrowAccount.Balance, "escrow balance mismatch") +} diff --git a/tools/celestia-node-fiber/testing/showcase_test.go b/tools/celestia-node-fiber/testing/showcase_test.go new file mode 100644 index 0000000000..f55b0a0c2b --- /dev/null +++ b/tools/celestia-node-fiber/testing/showcase_test.go @@ -0,0 +1,284 @@ +//go:build fibre + +package cnfibertest_test + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/api/client" + + "github.com/evstack/ev-node/block" + cnfiber "github.com/evstack/ev-node/tools/celestia-node-fiber" + cnfibertest "github.com/evstack/ev-node/tools/celestia-node-fiber/testing" +) + +const ( + // showcaseBlobs is how many distinct-payload blobs the test pushes + // through the adapter. Large enough to surface ordering and + // duplicate-handling bugs, small enough to keep wall time reasonable. + showcaseBlobs = 10 + + // listenEventsTimeout bounds the collection window for N BlobEvents. + // The async MsgPayForFibre broadcasts serialize on the TxClient + // mutex, so the dominant cost is block_time_per_tx × N. 60s gives + // ~6s per blob which is generous for a 50ms-precommit testnode. + listenEventsTimeout = 60 * time.Second +) + +// TestShowcase spins up a single-validator Celestia chain with an +// in-process Fibre server, a celestia-node bridge, and drives the full +// adapter surface: Listen subscribes first, Upload pushes N distinct +// blobs, the async MsgPayForFibre settlements commit on-chain, the +// subscription delivers an event per blob, and Download round-trips each +// payload byte-for-byte. +func TestShowcase(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + t.Cleanup(cancel) + + network := cnfibertest.StartNetwork(t, ctx) + bridge := cnfibertest.StartBridge(t, ctx, network) + + adapter, err := cnfiber.New(ctx, cnfiber.Config{ + Client: client.Config{ + ReadConfig: client.ReadConfig{ + BridgeDAAddr: bridge.RPCAddr(), + DAAuthToken: bridge.AdminToken, + EnableDATLS: false, + }, + SubmitConfig: client.SubmitConfig{ + DefaultKeyName: network.ClientAccount, + Network: "private", + CoreGRPCConfig: client.CoreGRPCConfig{ + Addr: network.ConsensusGRPCAddr(), + }, + }, + }, + }, network.Consensus.Keyring) + require.NoError(t, err, "constructing adapter") + t.Cleanup(func() { _ = adapter.Close() }) + + // Namespace: 10 bytes of v0 ID. Uploads share a namespace so Listen + // sees every settlement event in one stream. + namespace := bytes.Repeat([]byte{0xfe}, 10) + + // Subscribe BEFORE uploading so we don't race against settlements. + // fromHeight=0 → follow from the live tip. TestShowcaseResume below + // exercises the non-zero path. + events, err := adapter.Listen(ctx, namespace, 0) + require.NoError(t, err, "starting Listen subscription") + + // Build N distinctive payloads so byte-swapping or off-by-one BlobID + // reconstruction would be caught by the download diff below. + payloads := make([][]byte, showcaseBlobs) + for i := range payloads { + payloads[i] = []byte(fmt.Sprintf( + "showcase blob %02d — payload=%s", + i, bytes.Repeat([]byte{'a' + byte(i)}, 8+i), + )) + } + + // expected maps hex(BlobID) → original payload; populated by Upload + // and consulted by Listen + Download to catch misrouted bytes. + expected := make(map[string][]byte, showcaseBlobs) + ids := make([]block.FiberBlobID, showcaseBlobs) + for i, payload := range payloads { + res, err := adapter.Upload(ctx, namespace, payload) + require.NoError(t, err, "adapter.Upload #%d", i) + require.NotEmpty(t, res.BlobID, "upload #%d returned empty BlobID", i) + key := hex.EncodeToString(res.BlobID) + _, dup := expected[key] + require.False(t, dup, "adapter.Upload #%d returned a duplicate BlobID %s", i, key) + expected[key] = payload + ids[i] = res.BlobID + t.Logf("upload[%02d] ok: blob_id=%s size=%d", i, key, len(payload)) + } + + // Drain events until every Upload has a matching BlobEvent. Order is + // not guaranteed — multiple settlements can land in the same block. + seen := make(map[string]block.FiberBlobEvent, showcaseBlobs) + deadline := time.After(listenEventsTimeout) + for len(seen) < showcaseBlobs { + select { + case ev, ok := <-events: + require.True(t, ok, + "Listen channel closed with only %d/%d events", len(seen), showcaseBlobs) + key := hex.EncodeToString(ev.BlobID) + if _, want := expected[key]; !want { + t.Logf("listen: ignoring unexpected BlobID %s", key) + continue + } + if prev, dup := seen[key]; dup { + t.Fatalf("listen: duplicate event for BlobID %s (prev height=%d new height=%d)", + key, prev.Height, ev.Height) + } + seen[key] = ev + t.Logf("listen[%02d/%02d] ok: blob_id=%s height=%d data_size=%d", + len(seen), showcaseBlobs, key, ev.Height, ev.DataSize) + case <-deadline: + missing := make([]string, 0, showcaseBlobs-len(seen)) + for k := range expected { + if _, got := seen[k]; !got { + missing = append(missing, k) + } + } + t.Fatalf("timed out after %s: got %d/%d events; missing=%v", + listenEventsTimeout, len(seen), showcaseBlobs, missing) + } + } + + // Every event must carry the right DataSize and a non-zero block + // height. DataSize matches the original payload length because the + // adapter's Listen issues a Download per event to recover it (see + // listen.go). A silent byte truncation anywhere upstream would + // surface here before we even get to the Download round-trip. + for key, ev := range seen { + require.Greater(t, ev.Height, uint64(0), + "BlobEvent %s must carry a real block height", key) + require.Equal(t, uint64(len(expected[key])), ev.DataSize, + "BlobEvent %s DataSize must match original payload length", key) + } + + // Round-trip every blob through Download and diff bytes. Walking + // ids (upload order) rather than seen (map iteration order) keeps + // log output deterministic. + for i, id := range ids { + key := hex.EncodeToString(id) + got, err := adapter.Download(ctx, id) + require.NoError(t, err, "adapter.Download #%d (%s)", i, key) + require.Equal(t, expected[key], got, + "Download #%d (%s) bytes mismatch", i, key) + t.Logf("download[%02d] ok: blob_id=%s bytes=%d", i, key, len(got)) + } +} + +// TestShowcaseResume verifies that a subscriber can rejoin the stream +// from a historical block height and receive every matching blob that +// was settled since. This is the fromHeight > 0 path added by +// celestia-node#4962: Listen opens a WaitForHeight loop starting at the +// requested height, so callers can resume after a restart without +// missing blobs. +func TestShowcaseResume(t *testing.T) { + const resumeBlobs = 3 + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + t.Cleanup(cancel) + + network := cnfibertest.StartNetwork(t, ctx) + bridge := cnfibertest.StartBridge(t, ctx, network) + + adapter, err := cnfiber.New(ctx, cnfiber.Config{ + Client: client.Config{ + ReadConfig: client.ReadConfig{ + BridgeDAAddr: bridge.RPCAddr(), + DAAuthToken: bridge.AdminToken, + EnableDATLS: false, + }, + SubmitConfig: client.SubmitConfig{ + DefaultKeyName: network.ClientAccount, + Network: "private", + CoreGRPCConfig: client.CoreGRPCConfig{ + Addr: network.ConsensusGRPCAddr(), + }, + }, + }, + }, network.Consensus.Keyring) + require.NoError(t, err, "constructing adapter") + t.Cleanup(func() { _ = adapter.Close() }) + + namespace := bytes.Repeat([]byte{0xfd}, 10) + + // Phase 1: open a live subscription, upload N blobs, and harvest + // each blob's settlement height as reported by Listen. These + // heights are the ground truth for the resume test below. + liveCtx, liveCancel := context.WithCancel(ctx) + liveEvents, err := adapter.Listen(liveCtx, namespace, 0) + require.NoError(t, err, "starting live Listen for height discovery") + + payloads := make([][]byte, resumeBlobs) + ids := make([]block.FiberBlobID, resumeBlobs) + expected := make(map[string][]byte, resumeBlobs) + for i := range payloads { + payloads[i] = []byte(fmt.Sprintf("resume blob %d", i)) + res, err := adapter.Upload(ctx, namespace, payloads[i]) + require.NoError(t, err, "upload #%d", i) + ids[i] = res.BlobID + expected[hex.EncodeToString(res.BlobID)] = payloads[i] + t.Logf("phase1 upload[%d] blob_id=%s", i, hex.EncodeToString(res.BlobID)) + } + + heights := make(map[string]uint64, resumeBlobs) + for len(heights) < resumeBlobs { + select { + case ev, ok := <-liveEvents: + require.True(t, ok, "live Listen channel closed early") + key := hex.EncodeToString(ev.BlobID) + if _, want := expected[key]; !want { + continue + } + heights[key] = ev.Height + t.Logf("phase1 listen blob_id=%s height=%d", key, ev.Height) + case <-time.After(listenEventsTimeout): + t.Fatalf("timed out collecting heights: got %d/%d", len(heights), resumeBlobs) + } + } + liveCancel() + + // Pick the smallest height across all uploads. Resuming from this + // height must replay every blob we uploaded, regardless of whether + // multiple settlements landed in the same block. + var fromHeight uint64 + for _, h := range heights { + if fromHeight == 0 || h < fromHeight { + fromHeight = h + } + } + t.Logf("resume fromHeight=%d", fromHeight) + + // Phase 2: fresh Listen starting at fromHeight. Expect every blob + // we uploaded in phase 1 to be replayed. + resumeEvents, err := adapter.Listen(ctx, namespace, fromHeight) + require.NoError(t, err, "starting resume Listen") + + seen := make(map[string]block.FiberBlobEvent, resumeBlobs) + for len(seen) < resumeBlobs { + select { + case ev, ok := <-resumeEvents: + require.True(t, ok, "resume Listen channel closed early") + key := hex.EncodeToString(ev.BlobID) + if _, want := expected[key]; !want { + continue + } + if _, dup := seen[key]; dup { + t.Fatalf("resume Listen emitted duplicate for BlobID %s", key) + } + seen[key] = ev + t.Logf("phase2 listen[%d/%d] blob_id=%s height=%d data_size=%d", + len(seen), resumeBlobs, key, ev.Height, ev.DataSize) + case <-time.After(listenEventsTimeout): + missing := make([]string, 0, resumeBlobs-len(seen)) + for k := range expected { + if _, got := seen[k]; !got { + missing = append(missing, k) + } + } + t.Fatalf("timed out on resume Listen: got %d/%d; missing=%v", + len(seen), resumeBlobs, missing) + } + } + + // Every resume event must carry the correct DataSize (Download- + // resolved, same as the live Listen path) and the right height. + for key, ev := range seen { + require.Equal(t, uint64(len(expected[key])), ev.DataSize, + "resume BlobEvent %s DataSize must match original payload length", key) + require.Equal(t, heights[key], ev.Height, + "resume BlobEvent %s Height must match the block it settled in", key) + } +} diff --git a/tools/talis/.gitignore b/tools/talis/.gitignore new file mode 100644 index 0000000000..f48fd8e806 --- /dev/null +++ b/tools/talis/.gitignore @@ -0,0 +1,2 @@ +/talis +/cmd/evnode-txsim/evnode-txsim diff --git a/tools/talis/Makefile b/tools/talis/Makefile new file mode 100644 index 0000000000..291b6d36dd --- /dev/null +++ b/tools/talis/Makefile @@ -0,0 +1,175 @@ +# Build helpers for the talis fibre-experiment deploy. +# +# Targets: +# make build-bins build all three: celestia-appd, celestia, evnode-fibre +# make build-app celestia-appd from $(CELESTIA_APP_REPO) at $(CELESTIA_APP_REF) +# make build-node celestia from $(CELESTIA_NODE_REPO) at $(CELESTIA_NODE_REF) +# make build-evnode evnode-fibre runner from this repo (tools/celestia-node-fiber/cmd/evnode-fibre) +# make clean wipe build/ +# +# All binaries are cross-compiled to linux/amd64 — talis instances are +# Ubuntu 24.04 amd64 regardless of provider — and dropped into ./build/. +# Pass that directory to `talis genesis -b ./build` and the validator, +# bridge, and ev-node deploys all pick up the same artefacts. +# +# Repo locations and refs are overridable via env / make var, with +# sensible defaults that match the rest of fibre-experiment: +# +# CELESTIA_APP_REPO default: ../../../celestia-app (sibling clone) +# CELESTIA_APP_REF default: feat/fibre-payments +# CELESTIA_NODE_REPO default: ../../../celestia-node (sibling clone) +# CELESTIA_NODE_REF default: feature/fibre-experimental +# +# If the repos are not cloned next to this checkout, override the path +# variables at the make invocation: +# +# make build-bins CELESTIA_APP_REPO=/path/to/celestia-app + +CELESTIA_APP_REPO ?= ../../../celestia-app +CELESTIA_APP_REF ?= feat/fibre-payments + +CELESTIA_NODE_REPO ?= ../../../celestia-node +CELESTIA_NODE_REF ?= feature/fibre-experimental + +EVNODE_RUNNER_PATH ?= ../celestia-node-fiber/cmd/evnode-fibre +TXSIM_PATH ?= ./cmd/evnode-txsim + +BUILD_DIR := $(CURDIR)/build + +GOOS ?= linux +GOARCH ?= amd64 + +# -trimpath strips local paths from binaries so they're reproducible +# across machines. -s -w drops debug + symbol tables — saves ~30% size, +# matters for upload over slow links. +GOFLAGS_COMMON := -trimpath -ldflags='-s -w' + +# The fibre / valaddr modules in celestia-app + celestia-node are +# gated behind the `fibre` build tag. Without it, the keepers are +# stubbed out and the gRPC Query services aren't registered, which +# breaks both setup-fibre's host registry lookup and the bridge's +# blob.Subscribe path. +GOTAGS_FIBRE := -tags fibre + +.PHONY: build-bins build-app build-fibre build-fibre-txsim build-node build-evnode build-txsim clean check-repos + +build-bins: build-app build-fibre build-fibre-txsim build-node build-evnode build-txsim + @echo + @echo "✅ All binaries built into $(BUILD_DIR):" + @ls -lh $(BUILD_DIR) + +$(BUILD_DIR): + @mkdir -p $(BUILD_DIR) + +# celestia-appd from celestia-app's feat/fibre-payments. We compile +# from cmd/celestia-appd; the make target in celestia-app's Makefile +# does the same plus version-stamps but we don't need that for the +# experiment (any commit on feat/fibre-payments will do). +build-app: $(BUILD_DIR) check-repos + @echo "==> Building celestia-appd ($(CELESTIA_APP_REF))" + @cd $(CELESTIA_APP_REPO) && \ + git fetch --quiet origin $(CELESTIA_APP_REF) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_APP_REF)" ]; then \ + git checkout --quiet $(CELESTIA_APP_REF) || { \ + echo " NOTE: branch checked out in another worktree; building from current ref $$current"; \ + }; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/celestia-appd ./cmd/celestia-appd + @echo " -> $(BUILD_DIR)/celestia-appd" + +# fibre server (FSP) — separate cmd in celestia-app's repo at +# fibre/cmd/. Each validator runs one of these colocated; talis +# start-fibre tmuxs it on port 7980. Required at deploy time; +# without it `start-fibre` fails with "fibre: command not found". +build-fibre: $(BUILD_DIR) check-repos + @echo "==> Building fibre server ($(CELESTIA_APP_REF))" + @cd $(CELESTIA_APP_REPO) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_APP_REF)" ]; then \ + git checkout --quiet $(CELESTIA_APP_REF) || true; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/fibre ./fibre/cmd + @echo " -> $(BUILD_DIR)/fibre" + +# fibre-txsim — celestia-app's blob load generator. Used by talis +# fibre-txsim. Not strictly required for the ev-node experiment but +# baked into talis genesis's encoder-payload staging, so build it for +# completeness. +build-fibre-txsim: $(BUILD_DIR) check-repos + @echo "==> Building fibre-txsim ($(CELESTIA_APP_REF))" + @cd $(CELESTIA_APP_REPO) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_APP_REF)" ]; then \ + git checkout --quiet $(CELESTIA_APP_REF) || true; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/fibre-txsim ./tools/fibre-txsim + @echo " -> $(BUILD_DIR)/fibre-txsim" + +# celestia (bridge / light node binary) from celestia-node's +# feature/fibre-experimental. +build-node: $(BUILD_DIR) check-repos + @echo "==> Building celestia ($(CELESTIA_NODE_REF))" + @cd $(CELESTIA_NODE_REPO) && \ + git fetch --quiet origin $(CELESTIA_NODE_REF) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_NODE_REF)" ]; then \ + git checkout --quiet $(CELESTIA_NODE_REF) || { \ + echo " NOTE: branch checked out in another worktree; building from current ref $$current"; \ + }; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/celestia ./cmd/celestia + @echo " -> $(BUILD_DIR)/celestia" + +# evnode-fibre is the long-lived ev-node aggregator wired to +# celestia-node-fiber. Lives next to the adapter so it inherits the +# adapter's go.mod and avoids dragging celestia-node into testapp's +# tree. We rename the artefact to "evnode" so talis genesis --build-dir +# picks it up under that name (same convention as celestia / celestia-appd). +build-evnode: $(BUILD_DIR) + @echo "==> Building evnode-fibre (this repo, current HEAD)" + @cd $(EVNODE_RUNNER_PATH) && \ + GOWORK=off GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/evnode . + @echo " -> $(BUILD_DIR)/evnode" + +# evnode-baseline: same wiring as evnode-fibre but uses +# block.NewDAClient (celestia-node bridge JSON-RPC) instead of the +# Fibre adapter. Lives next to evnode-fibre so it shares the cnf +# go.mod and replace directives. +build-evnode-baseline: $(BUILD_DIR) + @echo "==> Building evnode-baseline (this repo, current HEAD)" + @cd $(EVNODE_RUNNER_PATH)/../evnode-baseline && \ + GOWORK=off GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/evnode-baseline . + @echo " -> $(BUILD_DIR)/evnode-baseline" + +# evnode-txsim is the load generator that ships to loadgen-* nodes. +# Stdlib-only, so the build is fast and has no sibling-repo deps. +build-txsim: $(BUILD_DIR) + @echo "==> Building evnode-txsim (this repo, current HEAD)" + @GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) -o $(BUILD_DIR)/evnode-txsim $(TXSIM_PATH) + @echo " -> $(BUILD_DIR)/evnode-txsim" + +# Sanity check: make sure the sibling clones exist so we fail with a +# friendly message instead of a deep go-tool error. .git can be either a +# directory (regular clone) or a file (git worktree), so test for either. +check-repos: + @if [ ! -e "$(CELESTIA_APP_REPO)/.git" ]; then \ + echo "ERROR: celestia-app repo not found at $(CELESTIA_APP_REPO)"; \ + echo " set CELESTIA_APP_REPO=/path/to/celestia-app or clone the repo there"; \ + exit 1; \ + fi + @if [ ! -e "$(CELESTIA_NODE_REPO)/.git" ]; then \ + echo "ERROR: celestia-node repo not found at $(CELESTIA_NODE_REPO)"; \ + echo " set CELESTIA_NODE_REPO=/path/to/celestia-node or clone the repo there"; \ + exit 1; \ + fi + +clean: + @rm -rf $(BUILD_DIR) diff --git a/tools/talis/README.md b/tools/talis/README.md new file mode 100644 index 0000000000..d89e1f8463 --- /dev/null +++ b/tools/talis/README.md @@ -0,0 +1,400 @@ +# talis + +## Prerequisites + +Talis supports DigitalOcean and Google Cloud. **Use only one provider per experiment.** + +### DigitalOcean Setup + +#### DigitalOcean Account + +- If you're part of the Celestia engineering team, ask for access to Celestia's DigitalOcean account or alternatively use a personal account. +- **Generate the API token:** Go to Settings → API → Generate New Token. +- Save the token somewhere that's easily accessible. + +### Google Cloud Setup + +#### Google Cloud Account + +- If you're part of the Celestia engineering team, ask for access to Celestia's Google Cloud account. **Make sure to use Google Cloud on when the experiment requires beefy hardware and high bandwidth. Otherwise, use DO** +- Create a service account with Compute Engine Admin permissions. +- Download the service account key JSON file. + +#### Firewall + +Firewall rules are automatically created when spinning up instances. They allow all incoming and outgoing traffic. + +### SSH Key + +- For quick and easy testing, create a new SSH key without a passphrase: + +```sh +ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519_no_passphrase -N "" +``` + +- Upload the SSH key to DigitalOcean: +- Navigate to Settings → Security → SSH Keys. +- Click "Add SSH Key". +- Paste your public key. + +```sh +cat ~/.ssh/id_ed25519_no_passphrase.pub +``` + +- Add your name into the name for quick and easy access we'll need this later. Now your key should appear in "SSH Keys" list. + +## Running talis + +You have two options when it comes to running talis. You can run it on your local machine which has high RAM requirements or you can run it inside of a DigitalOcean droplet. The guide for this will be at the end of the file. + +## Install + +```sh +go install ./tools/talis/ +``` + +All binaries used by nodes in the network are compiled on the user's local machine. Either change the target when compiling celestia-app, or use the docker image to ensure complete compatibility. + +```sh +make build-talis-bins +``` + +Note that this doesn't install binaries in the `$GOPATH/bin`, so you must specify the path when creating the payload with the `genesis` subcommand using `-b` (`--build-dir`) to copy an entire build directory, or the per-binary flags such as `-a` (`--app-binary`) and `-t` (`--txsim-binary`). See `genesis` subcommand usage below. + +## Usage + +If the relevant binaries are installed via go, and the celestia-app repo is +downloaded, then the talis defaults should work. Your `$GOPATH` is used to copy the scripts from this repo to the payload, along with default locations for the binaries. + +### init-env (Optional) + +Generate a `.env` template file with required environment variables for your provider. + +```sh +# Generate .env template for Google Cloud +talis init-env --provider googlecloud + +# Generate .env template for DigitalOcean (default) +talis init-env --provider digitalocean +``` + +This creates a `.env` file with all required and optional fields for the provider for you to fill in. + +### init + +Talis supports setting up an observability stack (Prometheus, Grafana, and Loki) for monitoring your network. Observability nodes can be deployed on either **DigitalOcean** or **Google Cloud**. + +**Note:** Set environment variables (or create `.env` file with `talis init-env`) **before** running `talis init` for automatic config population. + +```sh +# initializes the repo w/ editable scripts and configs +talis init -c -e + +# with observability node in case you want to view the metrics (DigitalOcean by default) +talis init -c -e --with-observability + +# with observability node on Google Cloud +talis init -c -e --with-observability --provider googlecloud +``` + +This will initialize the directory that contains directory structure used for conducting an experiment. + +```text +. +├── app.toml +├── config.json +├── config.toml +├── data/ +├── payload/ +└── scripts/ +``` + +the celestia-app configs (config.toml and app.toml) can be manually edited here, and they will be copied to each node. `config.json` is the talis specific configuration file that contains all info related to spinning up the network. This is updated after the nodes have been spun up. Basic defaults are set, but the relevant fields can either be edited after generation or via using a flag. At this point, it looks something like this: + +```json +{ + "validators": [], + "chain_id": "talis-test-3", + "experiment": "test-3", + "ssh_pub_key_path": "/home/HOSTNAME/.ssh/id_ed25519.pub", + "ssh_key_name": "HOSTNAME", + "digitalocean_token": "pulled from env var if available", + "google_cloud_project": "pulled from env var if available", + "google_cloud_key_json_path": "pulled from env var if available", + "s3_config": { + "region": "pulled from AWS_DEFAULT_REGION env var if available", + "access_key_id": "pulled from AWS_ACCESS_KEY_ID env var if available", + "secret_access_key": "pulled from AWS_SECRET_ACCESS_KEY env var if available", + "bucket_name": "pulled from AWS_S3_BUCKET env var if available", + "endpoint": "pulled from AWS_S3_ENDPOINT env var if available. Can be left empty if targeting an AWS S3 bucket" + } +} +``` + +Notes: + +- **Only use one cloud provider per experiment.** Fill out either DigitalOcean or Google Cloud fields, not both. Filling them both might end up ruining other experiments or having stuck experiments that need to be removed by hand. +- The AWS config supports any S3-compatible bucket. So it can be used with Digital Ocean and other cloud providers. +- Example: The S3 endpoint for Digital Ocean is: `https://.digitaloceanspaces.com/`. + +### add + +```sh +# adds specific nodes to the config (see flags for further configuration) +talis add -t -c + +# specify provider (digitalocean or googlecloud) +talis add -t -c --provider +``` + +If we call: + +```sh +talis add -t validator -c 1 +``` + +`node-type` options: `validator`, `observability` (bridges/lights are still not supported). +`provider` options: `digitalocean` (default), `googlecloud`. + +The config will look like: + +```json +{ + "validators": [ + { + "node_type": "validator", + "public_ip": "TBD", + "private_ip": "TBD", + "provider": "digitalocean", + "slug": "c2-16vcpu-32gb", + "region": "nyc3", // randomly determined unless specified. + "name": "validator-0", + "tags": [ + "talis", + "validator", + "validator-0", + "chainID" + ] + } + ], + ... + "chain_id": "talis-test", + "experiment": "test", + "ssh_pub_key_path": "/home/HOSTNAME/.ssh/id_ed25519.pub", + "ssh_key_name": "HOSTNAME", + ... +} +``` + +### up + +`up` uses the configuration to spin up the cloud instances. Note that this doesn't start the network! + +```sh +# uses the config to spin up nodes on the relevant cloud services +talis up + +# use more workers for faster instance creation. DigitalOcean has a 5000 requests/hour rate limit per API token. +# For droplet creation, each worker makes ~3-5 API calls per droplet, so ~20 workers should be safe for most use cases. +talis up --workers 20 +``` + +### genesis + +Before we can start the network, we need to create a payload that contains everything each instance needs to actually start the network. This includes all the required keys, configs, genesis.json, and startup scripts. The `--square-size` flag will change the `GovMaxSquareSize`. By default, the binaries in the $GOPATH/bin will be used, however if specific binaries are needed (likely unless you are running some flavor of debian), use the `-b` (`--build-dir`) flag to copy every binary from a build directory, or the individual flags such as `-a` (`--app-binary`) and `-t` (`--txsim-binary`) when you only need to override specific executables. + +```sh +# creates the payload for the network. This contains all addresses, configs, binaries (from your local GOPATH if not specified), genesis.json, and startup scripts. The `--square-size` flag will change the `GovMaxSquareSize` +talis genesis -s 256 -b /home/$HOSTNAME/go/src/github.com/celestiaorg/celestia-app/build +``` + +Keep in mind that we can still edit anything in the payload before deploying the network. + +Note: When increasing the genesis square size, ensure you also increase the `SquareSizeUpperBound` constant to allow blocks to be created at the new size. + +### deploy + +This step is when the network is actually started. The payload is uploaded to each instance in the network directly from the user's machine. After delivering the payload, the start script is executed in a tmux session called "app" on each machine. + +```sh +# sends the payload to each node and boots the network by executing the relevant startup scripts +talis deploy + +# use more workers for faster deployment (when using direct upload) +talis deploy --direct-payload-upload --workers 20 +``` + +Note: By default, the `deploy` command will upload the payload to the configured S3 bucket, and then download it in the nodes. To upload the payload directly without passing by S3, use the `--direct-payload-upload` flag. The `--workers` flag only affects the direct upload method. + +### txsim + +To load the network we can use `talis` to start txsim on as many validator nodes as we want for that experiment. + +```sh +# start txsim on some number of the validator instances +talis txsim -i -s --min-blob-size --max-blob-size +``` + +### status + +Often, it's useful to quickly check if all the nodes have caught up to the tip of the chain. This can be done via the status command, which simply prints the height of each validator after querying the `Status` endpoint. + +```sh +# check which height each validator is at +talis status +``` + +### traces + +To download traces from the network, we can use `talis` to download traces from as many validator nodes as we want for that experiment. + +```sh +# download some number of traces directly from nodes to your machine via sftp +talis download -n -t [flags] + +# use more workers for faster downloads from many nodes +talis download -n -t
--workers 20 +``` + +To quickly view block times, assuming this table was being traced we can run: + +```sh +talis download -n validator-0 -t consensus_block +``` + +or if we needed to quickly see all of the mempool traces: + +```sh +talis download -n validator-* -t mempool_tx +``` + +or if we want to check on the logs we can call: + +```sh +talis download -n validator-* -t logs +``` + +### Collecting all traces to an s3 bucket + +At the end of the experiment, we can quickly save all of the traces to an s3 bucket assuming that we filled out the s3 config in the config.json. + +```sh +talis upload-data +``` + +This could take a few minutes if there is a ton of trace data, but often is completed in <30s. To download this data from the s3 bucket, we can use the s3 subcommand: + +```sh +talis download s3 +``` + +### Modifying the nodes in place + +Instead of shutting down all of the nodes, if we want to run a slightly modified experiment, we can simply run the [reset](#reset) command then rerun the `genesis` and `deploy` commands. This will create a new payload and restart the network without tearing down the cloud instances. This will delete any trace data. + +### reset + +This command allows you to stop running services and clean up files created by the `deploy` command for either specific validators or all validators in the network. + +```sh +# Reset all validators in the network +talis reset + +# Reset specific validators +talis reset -v validator-0,validator-1 +``` + +### down + +Finally, remember to tear down the cloud instances. This should work first try, but it's a good habit to re-run or check the webUI for large experiments to make sure nodes were shut down successfully. + +```sh +# tears down the network +talis down + +# use more workers for faster teardown of many instances +talis down --workers 20 +``` + +## Running Talis inside of a DigitalOcean droplet + +Create a new droplet: + +- Recommended Size: 32GB RAM 16CPU +- SSH Keys: Add your SSH key + +SSH into the Droplet: + +```sh +ssh root@YOUR_DROPLET_IP +``` + +Install Deps: + +```sh +# Install Go +snap install go --channel=1.26/stable --classic + +# Install Docker +apt install docker.io -y +systemctl start docker +usermod -aG docker $USER + +# Install misc tools +apt install git curl jq -y +``` + +Set up Go env: + +```sh +echo 'export GOPATH="$HOME/go"' >> ~/.profile +echo 'export GOBIN="$GOPATH/bin"' >> ~/.profile +echo 'export PATH="$GOBIN:$PATH"' >> ~/.profile +source ~/.profile +``` + +Clone and build: + +```sh +# Clone celestia-app and cd into it +git clone https://github.com/celestiaorg/celestia-app.git +cd celestia-app + +# Build binaries (celestia, celestia-appd, txsim) +make build-talis-bins + +# Install talis +go install ./tools/talis/ +``` + +Set env variables: + +```sh +export DIGITALOCEAN_TOKEN="your_api_token_here" +export TALIS_SSH_KEY_PATH="~/.ssh/id_ed25519_no_passphrase" +``` + +**Run Talis:** + +Talis assumes that you're your default ssh key so if you created a new key above you need to specify it in the commands. + +```sh +# Initialize +talis init -c your-chain-id -e your-experiment + +# Add validators +talis add -t validator -c + +# Spin up talis (use more workers if creating many instances) +talis up -n -s --workers 20 + +# Create payload +talis genesis -s 128 -b build + +# Deploy (use more workers for faster direct deployment) +talis deploy -s --direct-payload-upload --workers 20 +``` + +**Save Snapshot:** + +After you're done running experiments, make sure to take a snapshot of your deployment droplet and destroy the original. diff --git a/tools/talis/add.go b/tools/talis/add.go new file mode 100644 index 0000000000..b6eeb0ca48 --- /dev/null +++ b/tools/talis/add.go @@ -0,0 +1,142 @@ +package main + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" +) + +func addCmd() *cobra.Command { + var ( + rootDir string + count int + nodeType string + provider string + region string + slug string + ) + cmd := &cobra.Command{ + Use: "add", + Short: "Adds a new instances to the configuration", + Aliases: []string{"a"}, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config %q: %w", rootDir, err) + } + + if provider == "" { + provider = "digitalocean" + } + + switch nodeType { + case "validator": + start := len(cfg.Validators) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanValidator(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudValidator(region) + case "aws": + cfg = cfg.WithAWSValidator(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Validators, start, slug) + case "encoder": + start := len(cfg.Encoders) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanEncoder(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudEncoder(region) + case "aws": + cfg = cfg.WithAWSEncoder(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Encoders, start, slug) + case "bridge": + start := len(cfg.Bridges) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanBridge(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudBridge(region) + case "aws": + cfg = cfg.WithAWSBridge(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Bridges, start, slug) + case "evnode": + start := len(cfg.Evnodes) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanEvnode(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudEvnode(region) + case "aws": + cfg = cfg.WithAWSEvnode(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Evnodes, start, slug) + case "loadgen": + start := len(cfg.Loadgens) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanLoadgen(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudLoadgen(region) + case "aws": + cfg = cfg.WithAWSLoadgen(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Loadgens, start, slug) + case "light": + log.Println("light nodes are not yet supported") + return nil + default: + return fmt.Errorf("unknown node type %q", nodeType) + } + + return cfg.Save(rootDir) + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().IntVarP(&count, "count", "c", 0, "Number of nodes to deploy") + _ = cmd.MarkFlagRequired("count") + cmd.Flags().StringVarP(&nodeType, "type", "t", "", "Type of the node (validator, encoder, bridge, evnode, loadgen, light)") + _ = cmd.MarkFlagRequired("type") + cmd.Flags().StringVarP(&provider, "provider", "p", "digitalocean", "Provider for the node (digitalocean, googlecloud, aws)") + cmd.Flags().StringVarP(®ion, "region", "r", "random", "the region to deploy the instance in (random if blank)") + cmd.Flags().StringVar(&slug, "slug", "", "provider-specific instance type override (e.g. c6in.4xlarge). Empty = provider default for the node type.") + + return cmd +} + +// applySlug overrides the Slug field on the just-added instances in the +// slice. It only touches entries at index [start, len(instances)) so a +// second `add` with a different `--slug` does not re-stamp earlier ones. +func applySlug(instances []Instance, start int, slug string) { + if slug == "" { + return + } + for i := start; i < len(instances); i++ { + instances[i].Slug = slug + } +} diff --git a/tools/talis/aws.go b/tools/talis/aws.go new file mode 100644 index 0000000000..249dc89bbd --- /dev/null +++ b/tools/talis/aws.go @@ -0,0 +1,1033 @@ +package main + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "math/rand" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" +) + +const ( + // c6in.4xlarge: 16 vCPU / 32 GiB / 25 Gbps baseline network with ENA + // Express (SRD). The "n" suffix marks network-enhanced variants, which + // is what talis fibre experiments care about — they're networking-bound. + AWSDefaultValidatorInstanceType = "c6in.4xlarge" + // c6in.2xlarge: 8 vCPU / 16 GiB — encoders submit blobs via gRPC and + // don't need the full validator footprint. + AWSDefaultEncoderInstanceType = "c6in.2xlarge" + // Bridge nodes are network-bound (relay headers / blob events from + // validators to ev-node). c6in.2xlarge gives the same 25 Gbps as + // the validators while halving CPU. + AWSDefaultBridgeInstanceType = "c6in.2xlarge" + // ev-node aggregators are CPU + network bound (block production + // + DA submit pipeline). Same shape as bridges; can be sized up + // per-experiment via `--slug`. + AWSDefaultEvnodeInstanceType = "c6in.2xlarge" + // Load generators are network-bound; same shape as evnode/bridge. + AWSDefaultLoadgenInstanceType = "c6in.2xlarge" + AWSDefaultObservabilityInstanceType = "t3.medium" + AWSDefaultRootVolumeGB = int32(400) + + // AWSSecurityGroupName is the name of the security group used by every + // talis instance. It is created per-region on demand and permits all + // inbound traffic — same posture as the GCP firewall rule. + AWSSecurityGroupName = "talis-allow-all" + // AWSPlacementGroupName is the name of the cluster placement group used + // by every talis instance in a region. Cluster strategy gives the lowest + // inter-instance latency within an AZ — critical for fibre/p2p. + AWSPlacementGroupName = "talis-cluster" + + // AWSCanonicalOwnerID is Canonical's AWS account ID. It owns the + // official Ubuntu AMIs we filter against. + AWSCanonicalOwnerID = "099720109477" + // AWSUbuntuImageNamePattern matches Ubuntu 24.04 LTS amd64 EBS SSD + // images (matches talis' default OS image for DO / GCP). + AWSUbuntuImageNamePattern = "ubuntu/images/hvm-ssd*/ubuntu-noble-24.04-amd64-server-*" + + // AWSDefaultZone is the AZ used for launches when Config.AWSZone is + // unset. Single-AZ launches keep all cross-instance traffic intra-AZ + // (free) and enable a cluster placement group for minimum latency. + AWSDefaultZone = "us-east-1a" +) + +// AWSRegions is the pool used when "random" is selected for an AWS +// instance. We ship a single region by default: cross-region traffic on +// AWS is billed at $0.09/GB (~9× DO), so running networking-heavy +// experiments across regions is wildly expensive. Operators who need +// multi-region can set an explicit Region on each Instance. +var AWSRegions = []string{"us-east-1"} + +// amiCache memoises the resolved Ubuntu AMI per region — AMIs are +// region-scoped and resolving them costs an API round-trip. +var amiCache sync.Map // map[region]string + +type AWSClient struct { + ClientInfo + defaultRegion string +} + +func NewAWSClient(cfg Config) (*AWSClient, error) { + if cfg.AWSRegion == "" { + return nil, errors.New("AWS region is required") + } + sshKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read SSH public key at %s: %w", cfg.SSHPubKeyPath, err) + } + return &AWSClient{ + ClientInfo: ClientInfo{ + sshKey: sshKey, + cfg: cfg, + }, + defaultRegion: cfg.AWSRegion, + }, nil +} + +func (c *AWSClient) Up(ctx context.Context, workers int) error { + zone := c.cfg.AWSZone + if zone == "" { + zone = AWSDefaultZone + } + + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != AWS { + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomAWSRegion() + } + if v.Zone == "" { + v.Zone = zone + } + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to create") + } + + insts, err := CreateAWSInstances(ctx, insts, string(c.sshKey), c.cfg.SSHKeyName, workers) + if err != nil { + return fmt.Errorf("failed to create instances: %w", err) + } + + for _, inst := range insts { + cfg, err := c.cfg.UpdateInstance(inst.Name, inst.PublicIP, inst.PrivateIP) + if err != nil { + return fmt.Errorf("failed to update config with instance %s: %w", inst.Name, err) + } + c.cfg = cfg + } + return nil +} + +func (c *AWSClient) Down(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != AWS { + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomAWSRegion() + } + insts = append(insts, v) + } + if len(insts) == 0 { + return fmt.Errorf("no instances to destroy") + } + _, err := DestroyAWSInstances(ctx, insts, workers) + return err +} + +func (c *AWSClient) List(ctx context.Context) error { + cnt := 0 + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + return fmt.Errorf("failed to create EC2 client in %s: %w", region, err) + } + insts, err := describeTalisInstances(ctx, client) + if err != nil { + return fmt.Errorf("describe instances in %s: %w", region, err) + } + for _, inst := range insts { + if cnt == 0 { + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "Name", "Status", "Region", "Public IP", "Created") + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "----", "------", "------", "---------", "-------") + } + state := "" + if inst.State != nil { + state = string(inst.State.Name) + } + publicIP := "" + if inst.PublicIpAddress != nil { + publicIP = *inst.PublicIpAddress + } + created := "" + if inst.LaunchTime != nil { + created = inst.LaunchTime.Format(time.RFC3339) + } + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", + instanceNameFromTags(inst.Tags), state, region, publicIP, created) + cnt++ + } + } + fmt.Println("Total number of talis instances:", cnt) + return nil +} + +func (c *AWSClient) GetConfig() Config { + return c.cfg +} + +func NewAWSValidator(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Validator) + i.Provider = AWS + i.Slug = AWSDefaultValidatorInstanceType + i.Region = region + return i +} + +func NewAWSEncoder(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Encoder) + i.Provider = AWS + i.Slug = AWSDefaultEncoderInstanceType + i.Region = region + return i +} + +func NewAWSBridge(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Bridge) + i.Provider = AWS + i.Slug = AWSDefaultBridgeInstanceType + i.Region = region + return i +} + +func NewAWSEvnode(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Evnode) + i.Provider = AWS + i.Slug = AWSDefaultEvnodeInstanceType + i.Region = region + return i +} + +func NewAWSLoadgen(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Loadgen) + i.Provider = AWS + i.Slug = AWSDefaultLoadgenInstanceType + i.Region = region + return i +} + +func NewAWSObservability(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Observability) + i.Provider = AWS + i.Slug = AWSDefaultObservabilityInstanceType + i.Region = region + return i +} + +func RandomAWSRegion() string { + return AWSRegions[rand.Intn(len(AWSRegions))] +} + +// awsRegionFromEnv returns the region stamped into Config when +// `--provider aws` is selected. Falls back to us-east-1 to match AWS +// SDK's historical implicit default. +func awsRegionFromEnv() string { + if r := os.Getenv(EnvVarAWSRegion); r != "" { + return r + } + return "us-east-1" +} + +// resolveAWSZone returns the given zone or AWSDefaultZone. +func resolveAWSZone(zone string) string { + if zone != "" { + return zone + } + return AWSDefaultZone +} + +// newEC2Client constructs a regional EC2 client using the SDK default +// credential chain (env vars, shared credentials file, IAM role, ...). +func newEC2Client(ctx context.Context, region string) (*ec2.Client, error) { + awsCfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("failed to load AWS config for region %s: %w", region, err) + } + return ec2.NewFromConfig(awsCfg), nil +} + +// CreateAWSInstances launches EC2 instances in parallel, each pinned to +// its Instance.Zone + the cluster placement group (where supported), +// waits for public + private IPs, and returns the filled-in slice. +func CreateAWSInstances(ctx context.Context, insts []Instance, sshKey, keyName string, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + insts, existing, err := filterExistingAWSInstances(ctx, insts) + if err != nil { + return nil, err + } + if len(existing) > 0 { + log.Println("Existing instances found, so they are not being created.") + for _, v := range existing { + log.Println("Skipping", v.Name, v.PublicIP, v.Tags) + } + } + + total := len(insts) + results := make(chan result, total) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(total) + + for _, v := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) + defer cancel() + + start := time.Now() + log.Println("Creating instance", inst.Name, "in region", inst.Region, start.Format(time.RFC3339)) + + pubIP, privIP, err := createAWSInstance(ctx, inst, sshKey, keyName) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("create %s: %w", inst.Name, err)} + return + } + inst.PublicIP = pubIP + inst.PrivateIP = privIP + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(v) + } + + go func() { + wg.Wait() + close(results) + }() + + var created []Instance + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed after %v %v\n", res.inst.Name, res.timeRequired, res.err) + } else { + created = append(created, res.inst) + fmt.Printf("✅ %s is up (public=%s) in %v\n", res.inst.Name, res.inst.PublicIP, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(created), total) + } + return created, nil +} + +// createAWSInstance runs the full per-instance provisioning: resolve +// AMI, ensure key pair + security group + placement group, resolve +// default subnet in the target AZ, RunInstances, wait for IPs. +func createAWSInstance(ctx context.Context, inst Instance, sshKey, keyName string) (string, string, error) { + client, err := newEC2Client(ctx, inst.Region) + if err != nil { + return "", "", err + } + + amiID, err := resolveUbuntuAMI(ctx, client, inst.Region) + if err != nil { + return "", "", fmt.Errorf("resolve AMI: %w", err) + } + if err := ensureAWSKeyPair(ctx, client, keyName, sshKey); err != nil { + return "", "", fmt.Errorf("ensure key pair: %w", err) + } + sgID, err := ensureAWSSecurityGroup(ctx, client) + if err != nil { + return "", "", fmt.Errorf("ensure security group: %w", err) + } + + useCPG := supportsClusterPlacement(inst.Slug) + if useCPG { + if err := ensureAWSPlacementGroup(ctx, client); err != nil { + return "", "", fmt.Errorf("ensure placement group: %w", err) + } + } + + zone := inst.Zone + if zone == "" { + zone = AWSDefaultZone + } + subnetID, err := defaultSubnetInAZ(ctx, client, zone) + if err != nil { + return "", "", fmt.Errorf("resolve subnet in %s: %w", zone, err) + } + + tags := awsTagsFromInstance(inst) + userData := base64.StdEncoding.EncodeToString([]byte(awsRootSSHUserData(sshKey, inst.Name))) + + placement := &ec2types.Placement{AvailabilityZone: aws.String(zone)} + if useCPG { + placement.GroupName = aws.String(AWSPlacementGroupName) + } + + runOut, err := client.RunInstances(ctx, &ec2.RunInstancesInput{ + ImageId: aws.String(amiID), + InstanceType: ec2types.InstanceType(inst.Slug), + MinCount: aws.Int32(1), + MaxCount: aws.Int32(1), + KeyName: aws.String(keyName), + UserData: aws.String(userData), + // Use a single NIC so we can force public-IP assignment regardless + // of the subnet's MapPublicIpOnLaunch setting. SubnetId and + // SecurityGroupIds must live on the interface — the API rejects + // both top-level and interface-level settings together. + NetworkInterfaces: []ec2types.InstanceNetworkInterfaceSpecification{{ + DeviceIndex: aws.Int32(0), + SubnetId: aws.String(subnetID), + Groups: []string{sgID}, + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + }}, + Placement: placement, + BlockDeviceMappings: []ec2types.BlockDeviceMapping{{ + DeviceName: aws.String("/dev/sda1"), + Ebs: &ec2types.EbsBlockDevice{ + VolumeSize: aws.Int32(AWSDefaultRootVolumeGB), + VolumeType: ec2types.VolumeTypeGp3, + DeleteOnTermination: aws.Bool(true), + }, + }}, + MetadataOptions: &ec2types.InstanceMetadataOptionsRequest{ + HttpTokens: ec2types.HttpTokensStateRequired, + HttpEndpoint: ec2types.InstanceMetadataEndpointStateEnabled, + }, + TagSpecifications: []ec2types.TagSpecification{ + {ResourceType: ec2types.ResourceTypeInstance, Tags: tags}, + {ResourceType: ec2types.ResourceTypeVolume, Tags: tags}, + }, + }) + if err != nil { + return "", "", fmt.Errorf("run instance: %w", err) + } + if len(runOut.Instances) == 0 || runOut.Instances[0].InstanceId == nil { + return "", "", fmt.Errorf("RunInstances returned no instances") + } + + return waitForAWSNetworkIP(ctx, client, *runOut.Instances[0].InstanceId) +} + +// supportsClusterPlacement reports whether the given EC2 instance type +// can join a cluster placement group. Cluster placement groups require +// compute/network-optimised families; burstable (t*) is explicitly +// rejected by the API. Observability nodes default to t3.medium, which +// falls back to AZ-only placement. +func supportsClusterPlacement(slug string) bool { + return slug != "" && !strings.HasPrefix(slug, "t") +} + +func waitForAWSNetworkIP(ctx context.Context, client *ec2.Client, instanceID string) (string, string, error) { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + case <-ticker.C: + out, err := client.DescribeInstances(ctx, &ec2.DescribeInstancesInput{ + InstanceIds: []string{instanceID}, + }) + if err != nil { + return "", "", err + } + inst, ok := firstInstance(out) + if !ok { + continue + } + var pubIP, privIP string + if inst.PublicIpAddress != nil { + pubIP = *inst.PublicIpAddress + } + if inst.PrivateIpAddress != nil { + privIP = *inst.PrivateIpAddress + } + if pubIP != "" && privIP != "" { + return pubIP, privIP, nil + } + } + } +} + +func firstInstance(out *ec2.DescribeInstancesOutput) (ec2types.Instance, bool) { + for _, r := range out.Reservations { + for _, i := range r.Instances { + return i, true + } + } + return ec2types.Instance{}, false +} + +// filterExistingAWSInstances removes instances whose experiment tag +// already exists in any region covered by the request. Groups by region +// so each region is queried once. +func filterExistingAWSInstances(ctx context.Context, insts []Instance) ([]Instance, []Instance, error) { + regions := make(map[string]struct{}) + for _, inst := range insts { + regions[inst.Region] = struct{}{} + } + + existingTags := make(map[string]bool) + for region := range regions { + client, err := newEC2Client(ctx, region) + if err != nil { + return nil, nil, err + } + tags, err := collectTalisTagKeys(ctx, client) + if err != nil { + return nil, nil, fmt.Errorf("list existing tags in %s: %w", region, err) + } + for tag := range tags { + existingTags[tag] = true + } + } + + var newInsts, existing []Instance + for _, inst := range insts { + experimentTag := GetExperimentTag(inst.Tags) + if experimentTag == "" || !existingTags[experimentTag] { + newInsts = append(newInsts, inst) + } else { + existing = append(existing, inst) + } + } + return newInsts, existing, nil +} + +func collectTalisTagKeys(ctx context.Context, client *ec2.Client) (map[string]bool, error) { + out := make(map[string]bool) + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("tag-key"), Values: []string{"talis"}}, + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + }, + }) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + for _, r := range page.Reservations { + for _, i := range r.Instances { + for _, t := range i.Tags { + if t.Key != nil { + out[*t.Key] = true + } + } + } + } + } + return out, nil +} + +func DestroyAWSInstances(ctx context.Context, insts []Instance, workers int) ([]Instance, error) { + return destroyAWSInstancesInternal(ctx, insts, workers) +} + +func destroyAWSInstancesInternal(ctx context.Context, insts []Instance, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + results := make(chan result, len(insts)) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, inst := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + start := time.Now() + fmt.Println("⏳ Deleting instance", inst.Name, inst.PublicIP) + + delCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + region := inst.Region + if region == "" { + found, err := findAWSInstanceRegion(delCtx, inst.Name) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("find region for %s: %w", inst.Name, err)} + return + } + region = found + } + + client, err := newEC2Client(delCtx, region) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("ec2 client %s: %w", region, err)} + return + } + + instanceID, err := findAWSInstanceID(delCtx, client, inst) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("find instance %s: %w", inst.Name, err)} + return + } + + if _, err := client.TerminateInstances(delCtx, &ec2.TerminateInstancesInput{ + InstanceIds: []string{instanceID}, + }); err != nil { + results <- result{inst: inst, err: fmt.Errorf("terminate %s: %w", inst.Name, err)} + return + } + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(inst) + } + + go func() { + wg.Wait() + close(results) + }() + + var removed []Instance + var failed []result + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed to delete after %v: %v\n", res.inst.Name, res.timeRequired, res.err) + failed = append(failed, res) + } else { + removed = append(removed, res.inst) + fmt.Printf("✅ %s terminated (took %v)\n", res.inst.Name, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(removed)+len(failed), len(insts)) + } + return removed, nil +} + +// findAWSInstanceID resolves an Instance (by its experiment tag if +// present, otherwise by Name) to an EC2 instance ID. Filters out +// already-terminated instances so repeated calls don't return ghosts. +func findAWSInstanceID(ctx context.Context, client *ec2.Client, inst Instance) (string, error) { + filters := []ec2types.Filter{ + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + } + if experimentTag := GetExperimentTag(inst.Tags); experimentTag != "" { + filters = append(filters, ec2types.Filter{Name: aws.String("tag-key"), Values: []string{experimentTag}}) + } else { + filters = append(filters, ec2types.Filter{Name: aws.String("tag:Name"), Values: []string{inst.Name}}) + } + + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{Filters: filters}) + var ids []string + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return "", err + } + for _, r := range page.Reservations { + for _, i := range r.Instances { + if i.InstanceId != nil { + ids = append(ids, *i.InstanceId) + } + } + } + } + + switch len(ids) { + case 0: + return "", fmt.Errorf("no instances found for %s", inst.Name) + case 1: + return ids[0], nil + default: + return "", fmt.Errorf("multiple instances match %s: %v", inst.Name, ids) + } +} + +func findAWSInstanceRegion(ctx context.Context, name string) (string, error) { + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + continue + } + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("tag:Name"), Values: []string{name}}, + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + }, + }) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + break + } + for _, r := range page.Reservations { + if len(r.Instances) > 0 { + return region, nil + } + } + } + } + return "", fmt.Errorf("instance %s not found in any known AWS region", name) +} + +// destroyAllTalisAWSInstances terminates every EC2 instance tagged +// "talis" across every known region. Called via `down --all`. +func destroyAllTalisAWSInstances(ctx context.Context, workers int) ([]Instance, error) { + var talisInstances []Instance + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + log.Printf("⚠️ failed to build EC2 client for %s: %v", region, err) + continue + } + insts, err := describeTalisInstances(ctx, client) + if err != nil { + log.Printf("⚠️ failed to describe instances in %s: %v", region, err) + continue + } + for _, i := range insts { + publicIP := "" + if i.PublicIpAddress != nil { + publicIP = *i.PublicIpAddress + } + talisInstances = append(talisInstances, Instance{ + Name: instanceNameFromTags(i.Tags), + PublicIP: publicIP, + Region: region, + }) + } + } + + if len(talisInstances) == 0 { + log.Println("No talis AWS instances found to destroy") + return nil, nil + } + return destroyAWSInstancesInternal(ctx, talisInstances, workers) +} + +func describeTalisInstances(ctx context.Context, client *ec2.Client) ([]ec2types.Instance, error) { + var out []ec2types.Instance + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("tag-key"), Values: []string{"talis"}}, + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + }, + }) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + for _, r := range page.Reservations { + out = append(out, r.Instances...) + } + } + return out, nil +} + +func checkForRunningAWSExperiments(ctx context.Context, awsRegionConfigured bool, experimentID, chainID string) (bool, error) { + if !awsRegionConfigured { + return false, nil + } + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + return false, fmt.Errorf("failed to create EC2 client in %s: %w", region, err) + } + insts, err := describeTalisInstances(ctx, client) + if err != nil { + return false, fmt.Errorf("describe instances in %s: %w", region, err) + } + for _, i := range insts { + for _, t := range i.Tags { + if t.Key == nil { + continue + } + if hasAWSExperimentTag(*t.Key, experimentID, chainID) { + return true, nil + } + } + } + } + return false, nil +} + +func hasAWSExperimentTag(tag, experimentID, chainID string) bool { + if !strings.HasPrefix(tag, "validator-") && + !strings.HasPrefix(tag, "bridge-") && + !strings.HasPrefix(tag, "light-") && + !strings.HasPrefix(tag, "encoder-") { + return false + } + return strings.Contains(tag, experimentID) && strings.Contains(tag, chainID) +} + +// resolveUbuntuAMI finds the most recent Ubuntu 24.04 AMI in the region. +// Results are cached in-process since AMI IDs rarely change and the +// lookup costs an API round-trip. +func resolveUbuntuAMI(ctx context.Context, client *ec2.Client, region string) (string, error) { + if cached, ok := amiCache.Load(region); ok { + return cached.(string), nil + } + + out, err := client.DescribeImages(ctx, &ec2.DescribeImagesInput{ + Owners: []string{AWSCanonicalOwnerID}, + Filters: []ec2types.Filter{ + {Name: aws.String("name"), Values: []string{AWSUbuntuImageNamePattern}}, + {Name: aws.String("state"), Values: []string{"available"}}, + {Name: aws.String("architecture"), Values: []string{"x86_64"}}, + {Name: aws.String("virtualization-type"), Values: []string{"hvm"}}, + }, + }) + if err != nil { + return "", fmt.Errorf("describe images: %w", err) + } + if len(out.Images) == 0 { + return "", fmt.Errorf("no Ubuntu AMIs found in %s", region) + } + + sort.Slice(out.Images, func(i, j int) bool { + a, b := "", "" + if out.Images[i].CreationDate != nil { + a = *out.Images[i].CreationDate + } + if out.Images[j].CreationDate != nil { + b = *out.Images[j].CreationDate + } + return a > b + }) + + amiID := "" + if out.Images[0].ImageId != nil { + amiID = *out.Images[0].ImageId + } + if amiID == "" { + return "", fmt.Errorf("selected AMI has no ID in %s", region) + } + amiCache.Store(region, amiID) + return amiID, nil +} + +// ensureAWSKeyPair imports the SSH public key under keyName if it's not +// already registered in the region. EC2 key pairs are region-scoped, so +// this runs once per region. +func ensureAWSKeyPair(ctx context.Context, client *ec2.Client, keyName, publicKey string) error { + if keyName == "" { + return errors.New("SSH key name is required for AWS — set via --ssh-key-name or TALIS_SSH_KEY_NAME") + } + if _, err := client.DescribeKeyPairs(ctx, &ec2.DescribeKeyPairsInput{ + KeyNames: []string{keyName}, + }); err == nil { + return nil + } + // Any error is treated as "not found"; let ImportKeyPair surface the + // real problem if something else is wrong. + if _, err := client.ImportKeyPair(ctx, &ec2.ImportKeyPairInput{ + KeyName: aws.String(keyName), + PublicKeyMaterial: []byte(strings.TrimSpace(publicKey)), + }); err != nil { + // `talis up` parallelises CreateAWSInstances, so each + // goroutine races to import the same key. Only one wins; + // the rest see Duplicate. Treat that as success — the key + // is now available for everyone. + if strings.Contains(err.Error(), "InvalidKeyPair.Duplicate") { + return nil + } + return fmt.Errorf("import key pair: %w", err) + } + return nil +} + +// ensureAWSSecurityGroup creates (or looks up) a security group in the +// region's default VPC that allows all inbound traffic from 0.0.0.0/0 — +// same posture as the GCP firewall rule. +func ensureAWSSecurityGroup(ctx context.Context, client *ec2.Client) (string, error) { + vpcID, err := defaultVPCID(ctx, client) + if err != nil { + return "", err + } + + desc, err := client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("group-name"), Values: []string{AWSSecurityGroupName}}, + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + }, + }) + if err == nil && len(desc.SecurityGroups) > 0 && desc.SecurityGroups[0].GroupId != nil { + return *desc.SecurityGroups[0].GroupId, nil + } + + create, err := client.CreateSecurityGroup(ctx, &ec2.CreateSecurityGroupInput{ + GroupName: aws.String(AWSSecurityGroupName), + Description: aws.String("Talis: allow all inbound traffic on all ports"), + VpcId: aws.String(vpcID), + }) + if err != nil { + // Another goroutine may have raced us; try to look it up again. + if desc2, err2 := client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("group-name"), Values: []string{AWSSecurityGroupName}}, + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + }, + }); err2 == nil && len(desc2.SecurityGroups) > 0 && desc2.SecurityGroups[0].GroupId != nil { + return *desc2.SecurityGroups[0].GroupId, nil + } + return "", fmt.Errorf("create security group: %w", err) + } + if create.GroupId == nil { + return "", fmt.Errorf("CreateSecurityGroup returned empty group id") + } + groupID := *create.GroupId + + if _, err := client.AuthorizeSecurityGroupIngress(ctx, &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(groupID), + IpPermissions: []ec2types.IpPermission{{ + IpProtocol: aws.String("-1"), // all protocols + IpRanges: []ec2types.IpRange{{CidrIp: aws.String("0.0.0.0/0")}}, + }}, + }); err != nil && !strings.Contains(err.Error(), "InvalidPermission.Duplicate") { + return "", fmt.Errorf("authorize ingress: %w", err) + } + return groupID, nil +} + +// ensureAWSPlacementGroup creates a cluster placement group in the +// region if one doesn't already exist. Idempotent and race-safe. +func ensureAWSPlacementGroup(ctx context.Context, client *ec2.Client) error { + out, err := client.DescribePlacementGroups(ctx, &ec2.DescribePlacementGroupsInput{ + GroupNames: []string{AWSPlacementGroupName}, + }) + if err == nil && len(out.PlacementGroups) > 0 { + return nil + } + if _, err := client.CreatePlacementGroup(ctx, &ec2.CreatePlacementGroupInput{ + GroupName: aws.String(AWSPlacementGroupName), + Strategy: ec2types.PlacementStrategyCluster, + }); err != nil && !strings.Contains(err.Error(), "InvalidPlacementGroup.Duplicate") { + return fmt.Errorf("create placement group: %w", err) + } + return nil +} + +// defaultSubnetInAZ returns the SubnetId of the default VPC's default +// subnet in the given AZ. Relies on default-VPC semantics (every account +// has one unless explicitly deleted) rather than managing subnets. +func defaultSubnetInAZ(ctx context.Context, client *ec2.Client, az string) (string, error) { + out, err := client.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("default-for-az"), Values: []string{"true"}}, + {Name: aws.String("availability-zone"), Values: []string{az}}, + }, + }) + if err != nil { + return "", fmt.Errorf("describe subnets: %w", err) + } + if len(out.Subnets) == 0 || out.Subnets[0].SubnetId == nil { + return "", fmt.Errorf("no default subnet in %s — the account may be missing a default VPC/subnet", az) + } + return *out.Subnets[0].SubnetId, nil +} + +func defaultVPCID(ctx context.Context, client *ec2.Client) (string, error) { + out, err := client.DescribeVpcs(ctx, &ec2.DescribeVpcsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("is-default"), Values: []string{"true"}}, + }, + }) + if err != nil { + return "", fmt.Errorf("describe default VPC: %w", err) + } + if len(out.Vpcs) == 0 || out.Vpcs[0].VpcId == nil { + return "", fmt.Errorf("no default VPC found — create one or extend talis to accept an explicit VPC") + } + return *out.Vpcs[0].VpcId, nil +} + +func awsTagsFromInstance(inst Instance) []ec2types.Tag { + tags := make([]ec2types.Tag, 0, len(inst.Tags)+1) + tags = append(tags, ec2types.Tag{Key: aws.String("Name"), Value: aws.String(inst.Name)}) + for _, t := range inst.Tags { + tags = append(tags, ec2types.Tag{Key: aws.String(t), Value: aws.String("true")}) + } + return tags +} + +func instanceNameFromTags(tags []ec2types.Tag) string { + for _, t := range tags { + if t.Key != nil && *t.Key == "Name" && t.Value != nil { + return *t.Value + } + } + return "" +} + +// awsRootSSHUserData returns cloud-init user-data that (1) sets the +// instance hostname to the talis name (validator_init.sh parses +// `hostname` to pick per-validator keys — AWS's default `ip-172-…` +// format breaks that parser), and (2) installs the operator's SSH +// public key into /root/.ssh/authorized_keys so deployment.go can keep +// using `root@`. +func awsRootSSHUserData(sshKey, instanceName string) string { + key := strings.TrimSpace(sshKey) + return fmt.Sprintf(`#cloud-config +disable_root: false +preserve_hostname: false +hostname: %s +fqdn: %s +runcmd: + - hostnamectl set-hostname %s + - mkdir -p /root/.ssh + - 'echo "%s" > /root/.ssh/authorized_keys' + - chmod 700 /root/.ssh + - chmod 600 /root/.ssh/authorized_keys + - chown -R root:root /root/.ssh +`, + instanceName, + instanceName, + instanceName, + strings.ReplaceAll(key, `"`, `\"`), + ) +} diff --git a/tools/talis/client.go b/tools/talis/client.go new file mode 100644 index 0000000000..5402d86bcd --- /dev/null +++ b/tools/talis/client.go @@ -0,0 +1,232 @@ +package main + +import ( + "context" + "errors" + "fmt" + "log" + "os" + + "github.com/digitalocean/godo" + "golang.org/x/oauth2" +) + +const ( + DODropletLimit = 100 +) + +type Client interface { + Up(ctx context.Context, workers int) error + Down(ctx context.Context, workers int) error + List(ctx context.Context) error + GetConfig() Config +} + +type ClientInfo struct { + sshKey []byte + cfg Config +} + +type DOClient struct { + ClientInfo + do *godo.Client + doSSHKey godo.Key +} + +func NewClient(cfg Config) (Client, error) { + if cfg.DigitalOceanToken != "" { + return NewDOClient(cfg) + } + if cfg.GoogleCloudProject != "" { + return NewGCClient(cfg) + } + if cfg.AWSRegion != "" { + return NewAWSClient(cfg) + } + return nil, errors.New("no cloud provider credentials found") +} + +func NewDOClient(cfg Config) (*DOClient, error) { + if cfg.DigitalOceanToken == "" { + return nil, errors.New("DigitalOcean token is required") + } + + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.DigitalOceanToken}) + client := godo.NewClient(oauth2.NewClient(context.Background(), tokenSource)) + + if client == nil { + return nil, errors.New("failed to create DigitalOcean client") + } + + sshKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read SSH public key at: %s %w", cfg.SSHPubKeyPath, err) + } + + key, err := GetDOSSHKeyMeta(context.Background(), client, string(sshKey)) + if err != nil { + return nil, fmt.Errorf("failed to get SSH key ID: %w", err) + } + + return &DOClient{ + ClientInfo: ClientInfo{ + sshKey: sshKey, + cfg: cfg, + }, + do: client, + doSSHKey: key, + }, nil +} + +func (c *DOClient) Up(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != DigitalOcean { + log.Println("unexpectedly skipping instance since only DO is supported", v.Name, "in region", v.Region) + continue + } + + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomDORegion() + } + + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to create") + } + + // Check if spinning up these instances would exceed the 100-droplet limit + currentCount, err := c.countRunningDroplets(ctx) + if err != nil { + log.Printf("⚠️ Warning: failed to count running droplets: %v", err) + } else { + totalAfterUp := currentCount + len(insts) + if totalAfterUp > DODropletLimit { + excess := totalAfterUp - DODropletLimit + return fmt.Errorf("cannot spin up %d instances: would exceed DigitalOcean's %d droplet limit (currently %d running, would be %d total). Please reduce the number of instances by %d", len(insts), DODropletLimit, currentCount, totalAfterUp, excess) + } + } + + insts, err = CreateDroplets(ctx, c.do, insts, c.doSSHKey, workers) + if err != nil { + return fmt.Errorf("failed to create droplets: %w", err) + } + + for _, inst := range insts { + cfg, err := c.cfg.UpdateInstance(inst.Name, inst.PublicIP, inst.PrivateIP) + if err != nil { + return fmt.Errorf("failed to update config with instance %s: %w", inst.Name, err) + } + c.cfg = cfg + } + + return err +} + +func (c *DOClient) countRunningDroplets(ctx context.Context) (int, error) { + opts := &godo.ListOptions{} + count := 0 + for { + droplets, resp, err := c.do.Droplets.List(ctx, opts) + if err != nil { + return 0, fmt.Errorf("failed to list droplets: %w", err) + } + + count += len(droplets) + + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + page, err := resp.Links.CurrentPage() + if err != nil { + return 0, fmt.Errorf("failed to paginate droplets list: %w", err) + } + + opts.Page = page + 1 + } + + return count, nil +} + +func (c *DOClient) Down(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != DigitalOcean { + log.Println("unexpectedly skipping instance since only DO is supported", v.Name, "in region", v.Region) + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomDORegion() + } + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to destroy") + } + + _, err := DestroyDroplets(ctx, c.do, insts, workers) + return err +} + +func (c *DOClient) List(ctx context.Context) error { + opts := &godo.ListOptions{} + cnt := 0 + for { + droplets, resp, err := c.do.Droplets.List(ctx, opts) + if err != nil { + return fmt.Errorf("failed to list droplets: %w", err) + } + + for _, droplet := range droplets { + if hasAllTags(droplet.Tags, []string{"talis"}) { + publicIP := "" + privateIP := "" + if len(droplet.Networks.V4) > 0 { + for _, network := range droplet.Networks.V4 { + if network.Type == "public" && publicIP == "" { + publicIP = network.IPAddress + } + if network.Type == "private" && privateIP == "" { + privateIP = network.IPAddress + } + } + } + + if cnt == 0 { + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "Name", "Status", "Region", "Public IP", "Created") + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "----", "------", "------", "---------", "-------") + } + + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", + droplet.Name, + droplet.Status, + droplet.Region.Slug, + publicIP, + droplet.Created) + cnt++ + } + } + + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + page, err := resp.Links.CurrentPage() + if err != nil { + return fmt.Errorf("failed to paginate droplets list: %w", err) + } + + opts.Page = page + 1 + } + + fmt.Println("Total number of talis instances:", cnt) + return nil +} + +func (c *DOClient) GetConfig() Config { + return c.cfg +} diff --git a/tools/talis/cmd/evnode-txsim/main.go b/tools/talis/cmd/evnode-txsim/main.go new file mode 100644 index 0000000000..d8fd1a9435 --- /dev/null +++ b/tools/talis/cmd/evnode-txsim/main.go @@ -0,0 +1,242 @@ +// Command evnode-txsim drives the ev-node aggregator's HTTP /tx endpoint +// at a fixed rate for a fixed duration. Stdlib-only; deployed by talis +// onto a dedicated load-gen instance that lives separately from +// ev-node and bridge boxes so its own CPU / network do not bias the +// measurement. +// +// Output format (final line, machine-grep'able): +// +// TXSIM: target=http://X:7777/tx duration=30s tx_size=10240 +// concurrency=8 sent=300000 ok=300000 err=0 +// wall_s=30.00 sent_per_s=10000 mb_per_s=97.66 +// rtt_p50_us=145 rtt_p99_us=820 +// +// Concurrency model: N goroutines, each posting txs at most as fast as +// the server accepts them. There's no client-side rate cap by design — +// the goal is to back-pressure the server and measure its absorption +// rate, not to simulate a paced client. +package main + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/binary" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sort" + "sync" + "sync/atomic" + "syscall" + "time" +) + +type cliFlags struct { + target string + txSize int + concurrency int + duration time.Duration + timeout time.Duration + verbose bool +} + +func parseFlags() cliFlags { + var c cliFlags + flag.StringVar(&c.target, "target", envOr("TARGET", "http://127.0.0.1:7777/tx"), + "ev-node tx-ingest endpoint (POST raw bytes)") + flag.IntVar(&c.txSize, "tx-size", intFromEnv("TX_SIZE", 10*1024), + "per-tx payload size in bytes") + flag.IntVar(&c.concurrency, "concurrency", intFromEnv("CONCURRENCY", 8), + "number of concurrent posters") + flag.DurationVar(&c.duration, "duration", durFromEnv("DURATION", 30*time.Second), + "how long to pump (0 = until SIGTERM)") + flag.DurationVar(&c.timeout, "timeout", durFromEnv("TIMEOUT", 5*time.Second), + "per-request HTTP timeout") + flag.BoolVar(&c.verbose, "verbose", false, "log every error to stderr") + flag.Parse() + return c +} + +func envOr(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +func intFromEnv(name string, def int) int { + if v := os.Getenv(name); v != "" { + var n int + if _, err := fmt.Sscanf(v, "%d", &n); err == nil && n > 0 { + return n + } + } + return def +} + +func durFromEnv(name string, def time.Duration) time.Duration { + if v := os.Getenv(name); v != "" { + if d, err := time.ParseDuration(v); err == nil { + return d + } + } + return def +} + +func main() { + cli := parseFlags() + if err := run(cli); err != nil { + fmt.Fprintln(os.Stderr, "fatal:", err) + os.Exit(1) + } +} + +func run(cli cliFlags) error { + if cli.txSize <= 16 { + return fmt.Errorf("--tx-size must be > 16 (header is 16 bytes: seq + emit_time)") + } + + // Pre-fill a randomness pool sized for cheap per-tx sampling. At + // 100 MiB/s of tx bytes, calling rand.Read per tx is itself the + // hot path; sampling from a fixed pool is dramatically cheaper + // and the experiment doesn't care about cryptographic uniqueness. + poolSize := 8 * cli.txSize + if poolSize < (1 << 20) { + poolSize = 1 << 20 + } + pool := make([]byte, poolSize) + if _, err := rand.Read(pool); err != nil { + return fmt.Errorf("seed random pool: %w", err) + } + + httpClient := &http.Client{Timeout: cli.timeout} + + ctx, cancel := context.WithCancel(context.Background()) + if cli.duration > 0 { + ctx, cancel = context.WithTimeout(ctx, cli.duration) + } + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + cancel() + }() + + var ( + sent atomic.Uint64 + ok atomic.Uint64 + fail atomic.Uint64 + totalBytes atomic.Uint64 + ) + + // RTT samples are collected in per-worker buffers and merged at + // the end. With a 30 s pump at 10 KiB/s per worker × 8 workers, + // that's ~30 000 samples per worker, which fits in memory comfortably. + rttBufs := make([][]int64, cli.concurrency) // microseconds + + wg := sync.WaitGroup{} + start := time.Now() + for i := range cli.concurrency { + wg.Add(1) + go func(idx int) { + defer wg.Done() + rtts := make([]int64, 0, 16384) + defer func() { rttBufs[idx] = rtts }() + + buf := make([]byte, cli.txSize) + poolLen := len(pool) + var localSeq uint64 + for { + if ctx.Err() != nil { + return + } + localSeq++ + now := time.Now() + binary.BigEndian.PutUint64(buf, uint64(idx)<<32|localSeq) + binary.BigEndian.PutUint64(buf[8:], uint64(now.UnixNano())) + offset := int((localSeq * 7919) % uint64(poolLen-cli.txSize+16)) + copy(buf[16:], pool[offset:offset+cli.txSize-16]) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, cli.target, bytes.NewReader(buf)) + if err != nil { + sent.Add(1) + fail.Add(1) + if cli.verbose { + fmt.Fprintln(os.Stderr, "request build:", err) + } + continue + } + req.Header.Set("Content-Type", "application/octet-stream") + + rttStart := time.Now() + resp, err := httpClient.Do(req) + rtt := time.Since(rttStart) + sent.Add(1) + if err != nil { + fail.Add(1) + if cli.verbose { + fmt.Fprintln(os.Stderr, "post:", err) + } + continue + } + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + ok.Add(1) + totalBytes.Add(uint64(cli.txSize)) + } else { + fail.Add(1) + if cli.verbose { + fmt.Fprintln(os.Stderr, "http:", resp.StatusCode) + } + } + rtts = append(rtts, rtt.Microseconds()) + } + }(i) + } + + wg.Wait() + elapsed := time.Since(start) + + // Merge RTT buffers and compute percentiles. Sorting in place is + // fine — the buffer goroutines have all returned by now. + var allRTT []int64 + totalRTT := 0 + for _, b := range rttBufs { + totalRTT += len(b) + } + allRTT = make([]int64, 0, totalRTT) + for _, b := range rttBufs { + allRTT = append(allRTT, b...) + } + sort.Slice(allRTT, func(i, j int) bool { return allRTT[i] < allRTT[j] }) + + p50 := percentileMicros(allRTT, 0.50) + p99 := percentileMicros(allRTT, 0.99) + + mb := float64(totalBytes.Load()) / (1024 * 1024) + mbPerS := mb / elapsed.Seconds() + sentPerS := float64(sent.Load()) / elapsed.Seconds() + + fmt.Printf("TXSIM: target=%s duration=%s tx_size=%d concurrency=%d sent=%d ok=%d err=%d wall_s=%.2f sent_per_s=%.0f mb_per_s=%.2f rtt_p50_us=%d rtt_p99_us=%d\n", + cli.target, cli.duration, cli.txSize, cli.concurrency, + sent.Load(), ok.Load(), fail.Load(), + elapsed.Seconds(), sentPerS, mbPerS, + p50, p99, + ) + return nil +} + +func percentileMicros(sorted []int64, p float64) int64 { + if len(sorted) == 0 { + return 0 + } + idx := int(float64(len(sorted)-1) * p) + return sorted[idx] +} diff --git a/tools/talis/config.go b/tools/talis/config.go new file mode 100644 index 0000000000..b3ec4e3ed5 --- /dev/null +++ b/tools/talis/config.go @@ -0,0 +1,469 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "sync/atomic" +) + +type NodeType string + +const ( + // Validator represents a validator node in the network. + Validator NodeType = "validator" + // Bridge represents a bridge node in the network. + Bridge NodeType = "bridge" + // Light represents a light node in the network. + Light NodeType = "light" + // Observability represents a observability monitoring node for Prometheus/Grafana. + Observability NodeType = "observability" + // Encoder represents a dedicated fibre-txsim encoder node. + Encoder NodeType = "encoder" + + // Evnode represents an ev-node aggregator wired to celestia-node-fiber. + // Runs the evnode-fibre runner from + // tools/celestia-node-fiber/cmd/evnode-fibre. One per experiment in + // the smallest topology. + Evnode NodeType = "evnode" + + // Loadgen is a dedicated load-generator instance running + // tools/talis/cmd/evnode-txsim. Lives on its own EC2 instance to + // keep its CPU + network footprint from biasing measurements on + // the ev-node box. + Loadgen NodeType = "loadgen" +) + +var ( + valCount = atomic.Uint32{} + nodeCount = atomic.Uint32{} + lightCount = atomic.Uint32{} + observabilityCount = atomic.Uint32{} + encoderCount = atomic.Uint32{} + evnodeCount = atomic.Uint32{} + loadgenCount = atomic.Uint32{} +) + +// NodeName returns the name of the node based on its type and index. The +// name is in the format "-". For example, "validator-0" or +// "bridge-1". Index is a global counter that is incremented for each node created. +func NodeName(nodeType NodeType) string { + index := 0 + switch nodeType { + case Validator: + index = int(valCount.Add(1)) - 1 + case Bridge: + index = int(nodeCount.Add(1)) - 1 + case Light: + index = int(lightCount.Add(1)) - 1 + case Observability: + index = int(observabilityCount.Add(1)) - 1 + case Encoder: + index = int(encoderCount.Add(1)) - 1 + case Evnode: + index = int(evnodeCount.Add(1)) - 1 + case Loadgen: + index = int(loadgenCount.Add(1)) - 1 + default: + panic(fmt.Sprintf("unknown node type: %s", nodeType)) + } + return fmt.Sprintf("%s-%d", nodeType, index) +} + +// Provider simply marks the provider the instance config should target. +type Provider string + +const ( + DigitalOcean Provider = "digitalocean" + GoogleCloud Provider = "googlecloud" + AWS Provider = "aws" +) + +// Instance represents a single instance in the network. It contains +// information about the instance such as its public and private IP address, +// provider, region, and name. It also contains a list of tags that are +// attached to the instance. +type Instance struct { + NodeType NodeType `json:"node_type"` + // PublicIP is the public IP address of the instance. + PublicIP string `json:"public_ip"` + // PrivateIP is the private IP address of the instance. + PrivateIP string `json:"private_ip"` + // Provider is the provider of the instance. For example, "digitalocean" or + // "aws". + Provider Provider `json:"provider"` + // Slug is a provider specific string that determines what type of instance + // the node is ran on. + Slug string `json:"slug"` + // Region is the region in which the instance is created. For example, + // "nyc1" for DigitalOcean or "us-east-1" for AWS. + Region string `json:"region"` + // Zone is the provider-specific availability zone within Region. Empty + // means "any zone". Currently only populated for AWS (e.g. "us-east-1a") + // so instances can be pinned to a single AZ for free intra-AZ traffic + // and minimum latency within a cluster placement group. + Zone string `json:"zone,omitempty"` + // Name is the name of the instance. This is used to identify the instance + // in the network and is also used as the hostname of the instance. It + // therefore should be unique. + Name string `json:"name"` + // Tags are attached to every spun up instance. They are used to identify + // the instance in the network, associate the instance with an experiment + // and network, and mark as a talis instance. + Tags []string `json:"tags"` +} + +func NewBaseInstance(nodeType NodeType) Instance { + name := NodeName(nodeType) + return Instance{ + NodeType: nodeType, + PublicIP: "TBD", + PrivateIP: "TBD", + Name: name, + Tags: []string{"talis"}, + } +} + +func (i Instance) WithExperiment(experimentID, chainID string) Instance { + index := extractIndexFromName(i.Name) + experimentTag := ExperimentTag(i.NodeType, index, experimentID, chainID) + i.Tags = append(i.Tags, experimentTag) + return i +} + +func extractIndexFromName(name string) int { + parts := strings.Split(name, "-") + if len(parts) < 2 { + return 0 + } + index, _ := strconv.Atoi(parts[len(parts)-1]) + return index +} + +func ExperimentTag(nodeType NodeType, index int, experimentID, chainID string) string { + return fmt.Sprintf("%s-%d-%s-%s", nodeType, index, experimentID, chainID) +} + +func GetExperimentTag(tags []string) string { + for _, tag := range tags { + if strings.HasPrefix(tag, "validator-") || strings.HasPrefix(tag, "bridge-") || strings.HasPrefix(tag, "light-") || strings.HasPrefix(tag, "observability-") || strings.HasPrefix(tag, "encoder-") || strings.HasPrefix(tag, "evnode-") || strings.HasPrefix(tag, "loadgen-") { + return tag + } + } + return "" +} + +// Config describes the desired state of the network. +type Config struct { + Validators []Instance `json:"validators"` + Bridges []Instance `json:"bridges,omitempty"` + Lights []Instance `json:"lights,omitempty"` + Observability []Instance `json:"observability,omitempty"` + Encoders []Instance `json:"encoders,omitempty"` + Evnodes []Instance `json:"evnodes,omitempty"` + Loadgens []Instance `json:"loadgens,omitempty"` + + // ChainID is the chain ID of the network. This is used to identify the + // network and is also used as the chain ID of the network. It is + // automatically prefixed with "talis-" by default. This is required to + // increase the square size beyond the v4 limit of 128. + ChainID string `json:"chain_id"` + // Experiment is the experiment ID of the network. This is used to index which experiment + // the network is associated with. + Experiment string `json:"experiment"` + // SSHPubKeyPath is the path to the SSH public key that will be added to + // every instance. + SSHPubKeyPath string `json:"ssh_pub_key_path"` + // SSHKeyName is the name of the SSH key that will be used to access the + // instances. This is used to identify the SSH key in the provider's + // dashboard. If it's not already kept by the provider, the key will be + // added. + SSHKeyName string `json:"ssh_key_name"` + // DigitalOceanToken is used to authenticate with DigitalOcean. It can be + // provided via an env var or flag. + DigitalOceanToken string `json:"digitalocean_token"` + GoogleCloudProject string `json:"google_cloud_project"` + GoogleCloudKeyJSONPath string `json:"google_cloud_key_json_path"` + // AWSRegion is the default region for launching EC2 instances. When set + // (and DigitalOceanToken / GoogleCloudProject are empty), NewClient + // uses AWS as the compute provider. Credentials come from the standard + // AWS SDK credential chain (env vars, ~/.aws/credentials, IAM role). + AWSRegion string `json:"aws_region"` + // AWSZone is the availability zone within AWSRegion. All AWS instances + // get pinned to this AZ + a cluster placement group so intra-cluster + // traffic stays free and latency is minimised. Empty means "default AZ". + AWSZone string `json:"aws_zone"` + S3Config S3Config `json:"s3_config"` +} + +func NewConfig(experiment, chainID string) Config { + return Config{ + Validators: []Instance{}, + Bridges: []Instance{}, + Lights: []Instance{}, + Observability: []Instance{}, + Encoders: []Instance{}, + Evnodes: []Instance{}, + Loadgens: []Instance{}, + Experiment: experiment, + ChainID: TalisChainID(chainID), + S3Config: S3Config{ + AccessKeyID: os.Getenv(EnvVarAWSAccessKeyID), + SecretAccessKey: os.Getenv(EnvVarAWSSecretAccessKey), + BucketName: os.Getenv(EnvVarS3Bucket), + Region: os.Getenv(EnvVarAWSRegion), + Endpoint: os.Getenv(EnvVarS3Endpoint), + }, + } +} + +func (cfg Config) WithSSHPubKeyPath(path string) Config { + cfg.SSHPubKeyPath = path + return cfg +} + +func (cfg Config) WithSSHKeyName(name string) Config { + cfg.SSHKeyName = name + return cfg +} + +func (cfg Config) WithDigitalOceanToken(token string) Config { + cfg.DigitalOceanToken = token + return cfg +} + +func (cfg Config) WithGoogleCloudProject(project string) Config { + cfg.GoogleCloudProject = project + return cfg +} + +func (cfg Config) WithGoogleCloudKeyJSONPath(keyJSONPath string) Config { + cfg.GoogleCloudKeyJSONPath = keyJSONPath + return cfg +} + +func (cfg Config) WithAWSRegion(region string) Config { + cfg.AWSRegion = region + return cfg +} + +func (cfg Config) WithAWSZone(zone string) Config { + cfg.AWSZone = zone + return cfg +} + +func (cfg Config) WithS3Config(s3 S3Config) Config { + cfg.S3Config = s3 + return cfg +} + +func (cfg Config) WithDigitalOceanValidator(region string) Config { + i := NewDigitalOceanValidator(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Validators = append(cfg.Validators, i) + return cfg +} + +func (cfg Config) WithDigitalOceanObservability(region string) Config { + i := NewDigitalOceanObservability(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Observability = append(cfg.Observability, i) + return cfg +} + +func (cfg Config) WithGoogleCloudValidator(region string) Config { + i := NewGoogleCloudValidator(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Validators = append(cfg.Validators, i) + return cfg +} + +func (cfg Config) WithGoogleCloudObservability(region string) Config { + i := NewGoogleCloudObservability(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Observability = append(cfg.Observability, i) + return cfg +} + +func (cfg Config) WithDigitalOceanEncoder(region string) Config { + i := NewDigitalOceanEncoder(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Encoders = append(cfg.Encoders, i) + return cfg +} + +func (cfg Config) WithGoogleCloudEncoder(region string) Config { + i := NewGoogleCloudEncoder(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Encoders = append(cfg.Encoders, i) + return cfg +} + +func (cfg Config) WithAWSValidator(region string) Config { + i := NewAWSValidator(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Validators = append(cfg.Validators, i) + return cfg +} + +func (cfg Config) WithAWSObservability(region string) Config { + i := NewAWSObservability(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Observability = append(cfg.Observability, i) + return cfg +} + +func (cfg Config) WithAWSEncoder(region string) Config { + i := NewAWSEncoder(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Encoders = append(cfg.Encoders, i) + return cfg +} + +func (cfg Config) WithDigitalOceanBridge(region string) Config { + i := NewDigitalOceanBridge(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Bridges = append(cfg.Bridges, i) + return cfg +} + +func (cfg Config) WithGoogleCloudBridge(region string) Config { + i := NewGoogleCloudBridge(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Bridges = append(cfg.Bridges, i) + return cfg +} + +func (cfg Config) WithAWSBridge(region string) Config { + i := NewAWSBridge(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Bridges = append(cfg.Bridges, i) + return cfg +} + +func (cfg Config) WithDigitalOceanEvnode(region string) Config { + i := NewDigitalOceanEvnode(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Evnodes = append(cfg.Evnodes, i) + return cfg +} + +func (cfg Config) WithGoogleCloudEvnode(region string) Config { + i := NewGoogleCloudEvnode(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Evnodes = append(cfg.Evnodes, i) + return cfg +} + +func (cfg Config) WithAWSEvnode(region string) Config { + i := NewAWSEvnode(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Evnodes = append(cfg.Evnodes, i) + return cfg +} + +func (cfg Config) WithDigitalOceanLoadgen(region string) Config { + i := NewDigitalOceanLoadgen(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Loadgens = append(cfg.Loadgens, i) + return cfg +} + +func (cfg Config) WithGoogleCloudLoadgen(region string) Config { + i := NewGoogleCloudLoadgen(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Loadgens = append(cfg.Loadgens, i) + return cfg +} + +func (cfg Config) WithAWSLoadgen(region string) Config { + i := NewAWSLoadgen(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Loadgens = append(cfg.Loadgens, i) + return cfg +} + +func (cfg Config) WithChainID(chainID string) Config { + cfg.ChainID = TalisChainID(chainID) + return cfg +} + +func (cfg Config) Save(root string) error { + // Create the directory if it doesn't exist + if err := os.MkdirAll(root, 0o755); err != nil { + return err + } + + // Create the config file path + configFilePath := filepath.Join(root, "config.json") + + cfgFile, err := os.OpenFile(configFilePath, os.O_RDWR|os.O_CREATE|os.O_SYNC, 0o755) + if err != nil { + return err + } + defer cfgFile.Close() + + // Write the config to the file + encoder := json.NewEncoder(cfgFile) + encoder.SetIndent("", " ") + return encoder.Encode(cfg) +} + +// LoadConfig loads the config from the specified path. +func LoadConfig(rootDir string) (Config, error) { + cfgFile, err := os.Open(filepath.Join(rootDir, "config.json")) + if err != nil { + return Config{}, err + } + defer cfgFile.Close() + + var cfg Config + decoder := json.NewDecoder(cfgFile) + if err := decoder.Decode(&cfg); err != nil { + return Config{}, err + } + + return cfg, nil +} + +func TalisChainID(chainID string) string { + return "talis-" + chainID +} + +func (cfg Config) UpdateInstance(name, publicIP, privateIP string) (Config, error) { + for i := range cfg.Validators { + if cfg.Validators[i].Name == name { + cfg.Validators[i].PublicIP = publicIP + cfg.Validators[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Bridges { + if cfg.Bridges[i].Name == name { + cfg.Bridges[i].PublicIP = publicIP + cfg.Bridges[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Lights { + if cfg.Lights[i].Name == name { + cfg.Lights[i].PublicIP = publicIP + cfg.Lights[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Observability { + if cfg.Observability[i].Name == name { + cfg.Observability[i].PublicIP = publicIP + cfg.Observability[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Encoders { + if cfg.Encoders[i].Name == name { + cfg.Encoders[i].PublicIP = publicIP + cfg.Encoders[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Evnodes { + if cfg.Evnodes[i].Name == name { + cfg.Evnodes[i].PublicIP = publicIP + cfg.Evnodes[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Loadgens { + if cfg.Loadgens[i].Name == name { + cfg.Loadgens[i].PublicIP = publicIP + cfg.Loadgens[i].PrivateIP = privateIP + return cfg, nil + } + } + return cfg, fmt.Errorf("instance %s not found", name) +} diff --git a/tools/talis/deployment.go b/tools/talis/deployment.go new file mode 100644 index 0000000000..72d0cfebee --- /dev/null +++ b/tools/talis/deployment.go @@ -0,0 +1,928 @@ +package main + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/digitalocean/godo" + "github.com/spf13/cobra" + "golang.org/x/oauth2" +) + +func upCmd() *cobra.Command { + var rootDir string + var cfgPath string + var SSHPubKeyPath string + var SSHKeyName string + var DOAPIToken string + var GCProject string + var GCKeyJSONPath string + var AWSRegion string + var workers int + + cmd := &cobra.Command{ + Use: "up", + Short: "Uses the config to spin up a distributed network", + Long: "Initialize the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + // overwrite the config values if flags or env vars are set + // flag > env > config + cfg.SSHKeyName = resolveValue(SSHKeyName, EnvVarSSHKeyName, cfg.SSHKeyName) + cfg.SSHPubKeyPath = resolveValue(SSHPubKeyPath, EnvVarSSHKeyPath, cfg.SSHPubKeyPath) + cfg.DigitalOceanToken = resolveValue(DOAPIToken, EnvVarDigitalOceanToken, cfg.DigitalOceanToken) + cfg.GoogleCloudProject = resolveValue(GCProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(GCKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + cfg.AWSRegion = resolveValue(AWSRegion, EnvVarAWSRegion, cfg.AWSRegion) + + if err := checkForRunningExperiments(cmd.Context(), cfg); err != nil { + return err + } + + client, err := NewClient(cfg) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + if err := client.Up(cmd.Context(), workers); err != nil { + return fmt.Errorf("failed to spin up network: %w", err) + } + + if err := client.GetConfig().Save(rootDir); err != nil { + return fmt.Errorf("failed to save config: %w", err) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&SSHPubKeyPath, "ssh-pub-key-path", "s", "", "path to the user's SSH public key") + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().StringVarP(&SSHKeyName, "ssh-key-name", "n", "", "name for the SSH key") + cmd.Flags().StringVarP(&DOAPIToken, "do-api-token", "t", "", "digital ocean api token (defaults to config or env)") + cmd.Flags().StringVar(&GCProject, "gc-project", "", "google cloud project (defaults to config or env)") + cmd.Flags().StringVar(&GCKeyJSONPath, "gc-key-json-path", "", "path to google cloud service account key JSON file (defaults to config or env)") + cmd.Flags().StringVar(&AWSRegion, "aws-region", "", "AWS default region for EC2 (defaults to config or AWS_DEFAULT_REGION)") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + + return cmd +} + +func deployCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + directUpload bool + ignoreFailed bool + workers int + ) + + cmd := &cobra.Command{ + Use: "deploy", + Short: "Uses the config to spin up a distributed network", + Long: "Initialize the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + tarPath := filepath.Join(rootDir, "payload.tar.gz") + log.Printf("Compressing payload to %s\n", tarPath) + tarCmd := exec.Command("tar", "-czf", tarPath, "-C", rootDir, "payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") // suppress macOS ._* resource-fork files + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress payload: %w, output: %s", err, string(output)) + } + log.Printf("✅ Payload compressed to %s\n", tarPath) + + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + log.Printf("Sending payload to validators...") + if directUpload { + if err := deployPayloadDirect(cfg.Validators, tarPath, SSHKeyPath, "/root", "payload/validator_init.sh", 7*time.Minute, workers); err != nil { + if !ignoreFailed { + return err + } + log.Printf("continuing despite validator deployment errors: %v", err) + } + if err := deployObservabilityIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload); err != nil { + return err + } + if err := deployEncodersIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployBridgesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployEvnodesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + return deployLoadgensIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers) + } + if err := deployPayloadViaS3(cmd.Context(), rootDir, cfg.Validators, tarPath, SSHKeyPath, "/root", "payload/validator_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + if !ignoreFailed { + return err + } + log.Printf("continuing despite validator deployment errors: %v", err) + } + if err := deployObservabilityIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload); err != nil { + return err + } + if err := deployEncodersIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployBridgesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployEvnodesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + return deployLoadgensIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers) + }, + } + + homeDir, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get user home directory: %v", err) + } + defaultKeyPath := filepath.Join(homeDir, ".ssh", "id_ed25519") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-pub-key-path", "s", defaultKeyPath, "path to the user's SSH key") + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().BoolVar(&directUpload, "direct-payload-upload", false, "Upload payload directly to nodes instead of using S3") + cmd.Flags().BoolVar(&ignoreFailed, "ignore-failed-validators", false, "Continue deploying observability monitoring even if some validators fail") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + + return cmd +} + +func deployObservabilityIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool) error { + if len(cfg.Observability) == 0 { + return nil + } + + observabilityNode := cfg.Observability[0] + + observabilityTarPath := filepath.Join(rootDir, "observability-payload.tar.gz") + log.Printf("Compressing observability payload to %s\n", observabilityTarPath) + tarCmd := exec.Command("tar", "-czf", observabilityTarPath, "-C", filepath.Join(rootDir, "payload"), "observability") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") // suppress macOS ._* resource-fork files + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress observability payload: %w, output: %s", err, string(output)) + } + log.Printf("✅ Observability payload compressed to %s\n", observabilityTarPath) + + log.Printf("Sending observability payload to observability monitoring node...") + var err error + if directUpload { + err = deployObservabilityPayloadDirect(observabilityNode, observabilityTarPath, sshKeyPath, "/root", 15*time.Minute) + } else { + err = deployObservabilityPayloadViaS3(ctx, rootDir, observabilityNode, observabilityTarPath, sshKeyPath, "/root", 15*time.Minute, cfg.S3Config) + } + if err != nil { + return err + } + + printGrafanaInfo(observabilityNode, rootDir) + return nil +} + +// deployBridgesIfConfigured tars the bridge-payload directory (celestia +// binary + genesis + bridge_init.sh) and ships it to each bridge +// instance. The init script then runs `celestia bridge init/start` in +// a tmux session and generates a JWT to /root/bridge-jwt.txt. +func deployBridgesIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Bridges) == 0 { + return nil + } + + bridgePayloadDir := filepath.Join(rootDir, "bridge-payload") + if _, err := os.Stat(bridgePayloadDir); os.IsNotExist(err) { + return fmt.Errorf("bridge-payload directory not found — run 'talis genesis' first") + } + + bridgeTarPath := filepath.Join(rootDir, "bridge-payload.tar.gz") + log.Printf("Compressing bridge payload to %s\n", bridgeTarPath) + tarCmd := exec.Command("tar", "-czf", bridgeTarPath, "-C", rootDir, "bridge-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress bridge payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending bridge payload to %d bridge(s)...\n", len(cfg.Bridges)) + + if directUpload { + if err := deployPayloadDirect(cfg.Bridges, bridgeTarPath, sshKeyPath, "/root", "bridge-payload/bridge_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("bridge deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Bridges, bridgeTarPath, sshKeyPath, "/root", "bridge-payload/bridge_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("bridge deployment: %w", err) + } + } + + log.Printf("Bridge deployment complete\n") + return nil +} + +// deployLoadgensIfConfigured tars the loadgen-payload directory +// (evnode-txsim binary + templated init script) and ships it to each +// load-gen instance. The init script poll-waits for ev-node's /stats +// endpoint to become reachable, then bursts traffic at /tx for the +// configured duration, writing a final TXSIM: line to /root/txsim.log. +func deployLoadgensIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Loadgens) == 0 { + return nil + } + + lgPayloadDir := filepath.Join(rootDir, "loadgen-payload") + if _, err := os.Stat(lgPayloadDir); os.IsNotExist(err) { + return fmt.Errorf("loadgen-payload directory not found — run 'talis genesis' first") + } + + lgTarPath := filepath.Join(rootDir, "loadgen-payload.tar.gz") + log.Printf("Compressing loadgen payload to %s\n", lgTarPath) + tarCmd := exec.Command("tar", "-czf", lgTarPath, "-C", rootDir, "loadgen-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress loadgen payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending loadgen payload to %d loadgen(s)...\n", len(cfg.Loadgens)) + + if directUpload { + if err := deployPayloadDirect(cfg.Loadgens, lgTarPath, sshKeyPath, "/root", "loadgen-payload/loadgen_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("loadgen deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Loadgens, lgTarPath, sshKeyPath, "/root", "loadgen-payload/loadgen_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("loadgen deployment: %w", err) + } + } + + log.Printf("loadgen deployment complete (init script will poll-wait for ev-node /stats then start txsim)\n") + return nil +} + +// deployEvnodesIfConfigured tars the evnode-payload directory (evnode +// binary + templated init script) and ships it to each ev-node +// instance. The init script poll-waits for the bridge JWT + fibre +// keyring, both scp'd in separately, before starting the daemon. +func deployEvnodesIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Evnodes) == 0 { + return nil + } + + evPayloadDir := filepath.Join(rootDir, "evnode-payload") + if _, err := os.Stat(evPayloadDir); os.IsNotExist(err) { + return fmt.Errorf("evnode-payload directory not found — run 'talis genesis' first") + } + + evTarPath := filepath.Join(rootDir, "evnode-payload.tar.gz") + log.Printf("Compressing evnode payload to %s\n", evTarPath) + tarCmd := exec.Command("tar", "-czf", evTarPath, "-C", rootDir, "evnode-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress evnode payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending evnode payload to %d evnode(s)...\n", len(cfg.Evnodes)) + + if directUpload { + if err := deployPayloadDirect(cfg.Evnodes, evTarPath, sshKeyPath, "/root", "evnode-payload/evnode_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("evnode deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Evnodes, evTarPath, sshKeyPath, "/root", "evnode-payload/evnode_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("evnode deployment: %w", err) + } + } + + log.Printf("evnode deployment complete (init script will poll-wait for bridge JWT + fibre keyring on each box)\n") + return nil +} + +// deployEncodersIfConfigured creates a lightweight encoder-payload tar and deploys +// it to all configured encoder instances. +func deployEncodersIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Encoders) == 0 { + return nil + } + + encoderPayloadDir := filepath.Join(rootDir, "encoder-payload") + if _, err := os.Stat(encoderPayloadDir); os.IsNotExist(err) { + return fmt.Errorf("encoder-payload directory not found — run 'talis genesis' first") + } + + encoderTarPath := filepath.Join(rootDir, "encoder-payload.tar.gz") + log.Printf("Compressing encoder payload to %s\n", encoderTarPath) + tarCmd := exec.Command("tar", "-czf", encoderTarPath, "-C", rootDir, "encoder-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress encoder payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending encoder payload to %d encoder(s)...\n", len(cfg.Encoders)) + + if directUpload { + if err := deployPayloadDirect(cfg.Encoders, encoderTarPath, sshKeyPath, "/root", "encoder-payload/encoder_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("encoder deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Encoders, encoderTarPath, sshKeyPath, "/root", "encoder-payload/encoder_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("encoder deployment: %w", err) + } + } + + log.Printf("Encoder deployment complete\n") + return nil +} + +// printGrafanaInfo prints the Grafana URL and a pointer to where credentials can be found. +func printGrafanaInfo(node Instance, rootDir string) { + envPath := filepath.Join(rootDir, "payload", "observability", "docker", ".env") + fmt.Println() + fmt.Println("Grafana available at:") + fmt.Printf(" http://%s:3000\n", node.PublicIP) + fmt.Printf(" Credentials: admin / \n", envPath) + fmt.Println() +} + +// deployPayloadDirect copies a local archive to each remote host, unpacks it, +// and launches the specified remote script inside a detached tmux session. +// It runs all operations in parallel and returns an error if any host fails. +func deployPayloadDirect( + ips []Instance, + archivePath string, // e.g. "./payload.tar.gz" + sshKeyPath string, // e.g. "~/.ssh/id_ed25519" + remoteDir string, // e.g. "/root" + remoteScript string, // e.g. "start.sh" + timeout time.Duration, // per‐host timeout + workers int, // number of concurrent workers +) error { + var wg sync.WaitGroup + errCh := make(chan error, len(ips)) + archiveFile := path.Base(archivePath) + + counter := atomic.Uint32{} + + workerChan := make(chan struct{}, workers) + for _, inst := range ips { + workerChan <- struct{}{} + wg.Add(1) + go func(inst Instance) { + defer func() { + <-workerChan + wg.Done() + }() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + scp := exec.CommandContext(ctx, + "scp", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + archivePath, + fmt.Sprintf("root@%s:%s/", inst.PublicIP, remoteDir), + ) + if out, err := scp.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] scp error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + + log.Printf("sent payload to instance 📦 %s: %s\n", inst.Name, inst.PublicIP) + + remoteCmd := strings.Join([]string{ + // unpack + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + // make sure script is executable + fmt.Sprintf("chmod +x %s", filepath.Join(remoteDir, remoteScript)), + // start in a named, detached tmux session + fmt.Sprintf("tmux new-session -d -s app '%s'", filepath.Join(remoteDir, remoteScript)), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + log.Printf("started instance ✅ %s: %s (total %d/%d)\n", inst.Name, inst.PublicIP, counter.Add(1), len(ips)) + }(inst) + } + + wg.Wait() + close(errCh) + + var errs []error //nolint:prealloc + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + var sb strings.Builder + sb.WriteString("deployment errors:\n") + for _, e := range errs { + sb.WriteString("- " + e.Error() + "\n") + } + return errors.New(sb.String()) + } + return nil +} + +// deployPayloadViaS3 uploads the payload to S3 first, then has each node download it +func deployPayloadViaS3( + ctx context.Context, + rootDir string, + ips []Instance, + archivePath string, + sshKeyPath string, + remoteDir string, + remoteScript string, + timeout time.Duration, + s3cfg S3Config, + workers int, +) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + s3Client, err := createS3Client(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to create S3 client: %w", err) + } + + log.Printf("Uploading payload to S3...\n") + s3URL, err := uploadToS3(ctx, s3Client, s3cfg, archivePath) + if err != nil { + return fmt.Errorf("failed to upload to S3: %w", err) + } + + log.Printf("✅ Payload uploaded to S3: %s\n", s3URL) + + var wg sync.WaitGroup + errCh := make(chan error, len(ips)) + counter := atomic.Uint32{} + workersChan := make(chan struct{}, workers) + + for _, inst := range ips { + wg.Add(1) + go func(inst Instance) { + workersChan <- struct{}{} + defer func() { + wg.Done() + <-workersChan + }() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + archiveFile := filepath.Base(archivePath) + remoteCmd := strings.Join([]string{ + fmt.Sprintf("curl -L '%s' -o %s", s3URL, filepath.Join(remoteDir, archiveFile)), + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + fmt.Sprintf("chmod +x %s", filepath.Join(remoteDir, remoteScript)), + fmt.Sprintf("tmux new-session -d -s app '%s'", filepath.Join(remoteDir, remoteScript)), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + log.Printf("started instance ✅ %s: %s (total %d/%d)\n", inst.Name, inst.PublicIP, counter.Add(1), len(ips)) + }(inst) + } + + wg.Wait() + close(errCh) + + errs := make([]error, 0) + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + var sb strings.Builder + sb.WriteString("deployment errors:\n") + for _, e := range errs { + sb.WriteString("- " + e.Error() + "\n") + } + return errors.New(sb.String()) + } + return nil +} + +// deployObservabilityPayloadDirect copies an observability archive to the observability monitoring host, unpacks it, +// installs prerequisites, and launches the observability stack in a detached tmux session. +func deployObservabilityPayloadDirect( + inst Instance, + archivePath string, + sshKeyPath string, + remoteDir string, + timeout time.Duration, +) error { + archiveFile := path.Base(archivePath) + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + scp := exec.CommandContext(ctx, + "scp", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + archivePath, + fmt.Sprintf("root@%s:%s/", inst.PublicIP, remoteDir), + ) + if out, err := scp.CombinedOutput(); err != nil { + return fmt.Errorf("[%s:%s] scp error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + } + + log.Printf("sent observability payload to instance 📦 %s: %s\n", inst.Name, inst.PublicIP) + + remoteCmd := strings.Join([]string{ + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + fmt.Sprintf("chmod +x %s %s", + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + ), + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + } + log.Printf("started observability instance ✅ %s: %s\n", inst.Name, inst.PublicIP) + + return nil +} + +// deployObservabilityPayloadViaS3 uploads the observability payload to S3 first, then has the node download it. +func deployObservabilityPayloadViaS3( + ctx context.Context, + rootDir string, + inst Instance, + archivePath string, + sshKeyPath string, + remoteDir string, + timeout time.Duration, + s3cfg S3Config, +) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + s3Client, err := createS3Client(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to create S3 client: %w", err) + } + + log.Printf("Uploading observability payload to S3...\n") + s3URL, err := uploadToS3(ctx, s3Client, s3cfg, archivePath) + if err != nil { + return fmt.Errorf("failed to upload observability payload to S3: %w", err) + } + + log.Printf("✅ Observability payload uploaded to S3: %s\n", s3URL) + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + archiveFile := filepath.Base(archivePath) + remoteCmd := strings.Join([]string{ + fmt.Sprintf("curl -L '%s' -o %s", s3URL, filepath.Join(remoteDir, archiveFile)), + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + fmt.Sprintf("chmod +x %s %s", + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + ), + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + } + log.Printf("started observability instance ✅ %s: %s\n", inst.Name, inst.PublicIP) + + return nil +} + +func uploadToS3(ctx context.Context, client *s3.Client, cfg S3Config, localPath string) (string, error) { + file, err := os.Open(localPath) + if err != nil { + return "", fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + filename := filepath.Base(localPath) + uploader := manager.NewUploader(client) + + if _, err := uploader.Upload(ctx, &s3.PutObjectInput{ + Bucket: &cfg.BucketName, + Key: &filename, + Body: file, + }); err != nil { + return "", fmt.Errorf("failed to upload file: %w", err) + } + + // Return a presigned GET URL valid for an hour so remote hosts can curl + // the object without the bucket/object needing public-read ACLs. Works + // for real AWS S3 (where public access is blocked by default) and for + // S3-compatible providers like DigitalOcean Spaces. + presign := s3.NewPresignClient(client) + req, err := presign.PresignGetObject(ctx, &s3.GetObjectInput{ + Bucket: &cfg.BucketName, + Key: &filename, + }, s3.WithPresignExpires(time.Hour)) + if err != nil { + return "", fmt.Errorf("failed to presign GET: %w", err) + } + + return req.URL, nil +} + +func downCmd() *cobra.Command { + var rootDir string + var cfgPath string + var SSHPubKeyPath string + var SSHKeyName string + var DOAPIToken string + var GCProject string + var GCKeyJSONPath string + var AWSRegion string + var workers int + var all bool + + cmd := &cobra.Command{ + Use: "down", + Short: "Uses the config to spin down a distributed network", + Long: "Destroys the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil && !all { + return fmt.Errorf("failed to load config: %w", err) + } + + // overwrite the config values if flags or env vars are set + // flag > env > config + cfg.DigitalOceanToken = resolveValue(DOAPIToken, EnvVarDigitalOceanToken, cfg.DigitalOceanToken) + cfg.GoogleCloudProject = resolveValue(GCProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(GCKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + cfg.AWSRegion = resolveValue(AWSRegion, EnvVarAWSRegion, cfg.AWSRegion) + + if all { + return destroyAllInstances(cmd.Context(), cfg, workers) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + cfg.SSHKeyName = resolveValue(SSHKeyName, EnvVarSSHKeyName, cfg.SSHKeyName) + cfg.SSHPubKeyPath = resolveValue(SSHPubKeyPath, EnvVarSSHKeyPath, cfg.SSHPubKeyPath) + + client, err := NewClient(cfg) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + if err := client.Down(cmd.Context(), workers); err != nil { + return fmt.Errorf("failed to spin down network: %w", err) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&SSHPubKeyPath, "ssh-pub-key-path", "s", "", "path to the user's SSH public key") + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().StringVarP(&SSHKeyName, "ssh-key-name", "n", "", "name for the SSH key") + cmd.Flags().StringVarP(&DOAPIToken, "do-api-token", "t", "", "digital ocean api token (defaults to config or env)") + cmd.Flags().StringVar(&GCProject, "gc-project", "", "google cloud project (defaults to config or env)") + cmd.Flags().StringVar(&GCKeyJSONPath, "gc-key-json-path", "", "path to google cloud service account key JSON file (defaults to config or env)") + cmd.Flags().StringVar(&AWSRegion, "aws-region", "", "AWS default region for EC2 (defaults to config or AWS_DEFAULT_REGION)") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + cmd.Flags().BoolVar(&all, "all", false, "destroy all talis instances across all providers and all experiments") + + return cmd +} + +// resolveValue selects a value based on priority: flag > env > config +func resolveValue(flagVal, envKey, configVal string) string { + if flagVal != "" { + return flagVal + } + if env := os.Getenv(envKey); env != "" { + if configVal != "" { + log.Printf("Using %s from environment variable instead of config", envKey) + } + return env + } + return configVal +} + +func listCmd() *cobra.Command { + var rootDir string + var cfgPath string + var DOAPIToken string + var GCProject string + var GCKeyJSONPath string + var AWSRegion string + + cmd := &cobra.Command{ + Use: "list", + Short: "Lists the instances in the network", + Long: "Lists the instances in the network. Can be used to see if someone is running experiments at the moment", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + // overwrite the config values if flags or env vars are set + // flag > env > config + cfg.DigitalOceanToken = resolveValue(DOAPIToken, EnvVarDigitalOceanToken, cfg.DigitalOceanToken) + cfg.GoogleCloudProject = resolveValue(GCProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(GCKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + cfg.AWSRegion = resolveValue(AWSRegion, EnvVarAWSRegion, cfg.AWSRegion) + + client, err := NewClient(cfg) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + return client.List(cmd.Context()) + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().StringVarP(&DOAPIToken, "do-api-token", "t", "", "digital ocean api token (defaults to config or env)") + cmd.Flags().StringVar(&GCProject, "gc-project", "", "google cloud project (defaults to config or env)") + cmd.Flags().StringVar(&GCKeyJSONPath, "gc-key-json-path", "", "path to google cloud service account key JSON file (defaults to config or env)") + cmd.Flags().StringVar(&AWSRegion, "aws-region", "", "AWS default region for EC2 (defaults to config or AWS_DEFAULT_REGION)") + + return cmd +} + +func checkForRunningExperiments(ctx context.Context, cfg Config) error { + var hasRunningExperiments bool + + if cfg.DigitalOceanToken != "" { + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.DigitalOceanToken}) + doClient := godo.NewClient(oauth2.NewClient(ctx, tokenSource)) + running, err := checkForRunningDOExperiments(ctx, doClient, cfg.Experiment, cfg.ChainID) + if err != nil { + log.Printf("⚠️ Warning: failed to check DigitalOcean for running experiments: %v", err) + } else if running { + hasRunningExperiments = true + log.Printf("⚠️ Found experiment '%s' with chainID '%s' already running in DigitalOcean", cfg.Experiment, cfg.ChainID) + } + } + + if cfg.GoogleCloudProject != "" { + opts, err := gcClientOptions(cfg) + if err != nil { + log.Printf("⚠️ Warning: failed to create Google Cloud client options: %v", err) + } else { + running, err := checkForRunningGCExperiments(ctx, cfg.GoogleCloudProject, opts, cfg.Experiment, cfg.ChainID) + if err != nil { + log.Printf("⚠️ Warning: failed to check Google Cloud for running experiments: %v", err) + } else if running { + hasRunningExperiments = true + log.Printf("⚠️ Found experiment '%s' with chainID '%s' already running in Google Cloud", cfg.Experiment, cfg.ChainID) + } + } + } + + if cfg.AWSRegion != "" { + running, err := checkForRunningAWSExperiments(ctx, true, cfg.Experiment, cfg.ChainID) + if err != nil { + log.Printf("⚠️ Warning: failed to check AWS for running experiments: %v", err) + } else if running { + hasRunningExperiments = true + log.Printf("⚠️ Found experiment '%s' with chainID '%s' already running in AWS", cfg.Experiment, cfg.ChainID) + } + } + + if hasRunningExperiments { + return fmt.Errorf("experiment '%s' with chainID '%s' is already running", cfg.Experiment, cfg.ChainID) + } + + return nil +} + +func destroyAllInstances(ctx context.Context, cfg Config, workers int) error { + var wg sync.WaitGroup + // One slot per potential provider goroutine (DO + GCP + AWS). Sized + // to match max writers so a three-way all-fail doesn't deadlock on + // errCh<- (wg.Wait() below blocks on the goroutine, which blocks on + // the channel send if capacity < writers). + errCh := make(chan error, 3) + + if cfg.DigitalOceanToken != "" { + wg.Go(func() { + log.Println("Destroying all DigitalOcean instances...") + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.DigitalOceanToken}) + doClient := godo.NewClient(oauth2.NewClient(ctx, tokenSource)) + if _, err := destroyAllTalisDroplets(ctx, doClient, workers); err != nil { + errCh <- fmt.Errorf("DigitalOcean: %w", err) + } + }) + } + + if cfg.GoogleCloudProject != "" { + wg.Go(func() { + log.Println("Destroying all Google Cloud instances...") + opts, err := gcClientOptions(cfg) + if err != nil { + errCh <- fmt.Errorf("google Cloud client options: %w", err) + return + } + if _, err := destroyAllTalisGCInstances(ctx, cfg.GoogleCloudProject, opts, workers); err != nil { + errCh <- fmt.Errorf("google Cloud: %w", err) + } + }) + } + + if cfg.AWSRegion != "" || os.Getenv(EnvVarAWSAccessKeyID) != "" { + wg.Go(func() { + log.Println("Destroying all AWS instances...") + if _, err := destroyAllTalisAWSInstances(ctx, workers); err != nil { + errCh <- fmt.Errorf("AWS: %w", err) + } + }) + } + + wg.Wait() + close(errCh) + + errs := make([]error, 0, 3) + for err := range errCh { + errs = append(errs, err) + } + + if len(errs) > 0 { + var sb strings.Builder + sb.WriteString("errors destroying instances:\n") + for _, err := range errs { + sb.WriteString("- " + err.Error() + "\n") + } + return errors.New(sb.String()) + } + + log.Println("✅ All talis instances destroyed") + return nil +} diff --git a/tools/talis/digital_ocean.go b/tools/talis/digital_ocean.go new file mode 100644 index 0000000000..bb17711d00 --- /dev/null +++ b/tools/talis/digital_ocean.go @@ -0,0 +1,529 @@ +package main + +import ( + "context" + "fmt" + "log" + "math/rand" + "net/http" + "slices" + "strings" + "sync" + "time" + + "github.com/digitalocean/godo" +) + +const ( + DODefaultValidatorSlug = "c2-16vcpu-32gb" + DODefaultEncoderSlug = "c2-8vcpu-16gb" + DODefaultBridgeSlug = "c2-8vcpu-16gb" + DODefaultEvnodeSlug = "c2-8vcpu-16gb" + DODefaultLoadgenSlug = "c2-8vcpu-16gb" + DODefaultObservabilitySlug = "s-2vcpu-4gb" + DODefaultImage = "ubuntu-24-04-x64" + RandomRegion = "random" +) + +var ( + DORegions = []string{ + "nyc1", "nyc3", "tor1", "sfo2", "sfo3", "ams3", "sgp1", "lon1", "fra1", "syd1", + } + + DOLargeRegions = map[string]int{ + "nyc3": 6, "tor1": 6, "sfo2": 2, "sfo3": 6, "ams3": 8, "sgp1": 4, "lon1": 8, "fra1": 6, "syd1": 6, + } + + DOMediumRegions = map[string]int{ + "nyc3": 2, "tor1": 2, "sfo3": 2, "ams3": 2, "lon1": 2, + } + + DOSmallRegions = map[string]int{ + "ams3": 1, "tor1": 1, "nyc3": 1, "lon1": 1, + } +) + +func NewDigitalOceanValidator(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Validator) + i.Provider = DigitalOcean + i.Slug = DODefaultValidatorSlug + i.Region = region + return i +} + +func NewDigitalOceanEncoder(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Encoder) + i.Provider = DigitalOcean + i.Slug = DODefaultEncoderSlug + i.Region = region + return i +} + +func NewDigitalOceanBridge(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Bridge) + i.Provider = DigitalOcean + i.Slug = DODefaultBridgeSlug + i.Region = region + return i +} + +func NewDigitalOceanEvnode(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Evnode) + i.Provider = DigitalOcean + i.Slug = DODefaultEvnodeSlug + i.Region = region + return i +} + +func NewDigitalOceanLoadgen(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Loadgen) + i.Provider = DigitalOcean + i.Slug = DODefaultLoadgenSlug + i.Region = region + return i +} + +func NewDigitalOceanObservability(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Observability) + i.Provider = DigitalOcean + i.Slug = DODefaultObservabilitySlug + i.Region = region + return i +} + +func RandomDORegion() string { + return DORegions[rand.Intn(len(DORegions))] +} + +// GetDOSSHKeyMeta checks if the provided raw SSH public key is registered in DigitalOcean +// and returns its ID and Name. If not found, returns an error instructing to upload the key. +func GetDOSSHKeyMeta(ctx context.Context, client *godo.Client, publicKey string) (godo.Key, error) { + pubKeySplit := strings.Split(publicKey, " ") + if len(pubKeySplit) <= 1 { + return godo.Key{}, fmt.Errorf("invalid public key format") + } + publicKey = strings.Join(pubKeySplit[:2], "") + + // Pagination options + opt := &godo.ListOptions{PerPage: 200} + + for { + keys, resp, err := client.Keys.List(ctx, opt) + if err != nil { + return godo.Key{}, fmt.Errorf("failed to list SSH keys: %w", err) + } + + for _, key := range keys { + // only compare the first two parts of the public key. The third part is the host + // which can be ignored. + if strings.Join(strings.Split(key.PublicKey, " ")[:2], "") == publicKey { + return key, nil + } + } + + // Break if we're at the last page + if resp.Links.IsLastPage() { + break + } + // Advance to next page + page, err := resp.Links.CurrentPage() + if err != nil { + return godo.Key{}, fmt.Errorf("unable to parse pagination: %w", err) + } + opt.Page = page + 1 + } + + return godo.Key{}, fmt.Errorf( + "ssh public key not found in DigitalOcean. Please upload it via the control panel or API before proceeding", + ) +} + +// CreateDroplets launches all droplets in parallel, waits for their IPs, and +// returns the filled-out []Instance slice. +func CreateDroplets(ctx context.Context, client *godo.Client, insts []Instance, key godo.Key, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + insts, existing, err := filterExistingInstances(ctx, client, insts) + if err != nil { + return nil, err + } + + if len(existing) > 0 { + log.Println("Existing instances found, so they are not being created.") + for _, v := range existing { + log.Println("Skipping", v.Name, v.PublicIP, v.Tags) + } + } + + total := len(insts) + results := make(chan result, total) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(total) + + for _, v := range insts { + go func() { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) + defer cancel() + + req := &godo.DropletCreateRequest{ + Name: v.Name, + Region: v.Region, + Size: v.Slug, + Image: godo.DropletCreateImage{ + Slug: DODefaultImage, + }, + SSHKeys: []godo.DropletCreateSSHKey{{ID: key.ID, Fingerprint: key.Fingerprint}}, + Tags: v.Tags, + } + + start := time.Now() + + log.Println("Creating droplet", v.Name, "in region", v.Region, start.Format(time.RFC3339)) + + d, _, err := client.Droplets.Create(ctx, req) + if err != nil { + results <- result{inst: v, err: fmt.Errorf("create %s: %w", v.Name, err)} + return + } + + pubIP, privIP, err := waitForNetworkIP(ctx, client, d.ID) + if err != nil { + results <- result{inst: v, err: fmt.Errorf("public IP %s: %w", v.Name, err)} + return + } + + v.PublicIP = pubIP + v.PrivateIP = privIP + results <- result{inst: v, err: nil, timeRequired: time.Since(start)} + }() + } + + go func() { + wg.Wait() + close(results) + }() + + var created []Instance + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed after %v %v\n", res.inst.Name, res.timeRequired, res.err) + } else { + created = append(created, res.inst) + fmt.Printf("✅ %s is up (public=%s) in %v\n", + res.inst.Name, res.inst.PublicIP, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(created), total) + } + + return created, nil +} + +func filterExistingInstances(ctx context.Context, client *godo.Client, insts []Instance) ([]Instance, []Instance, error) { + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return nil, nil, fmt.Errorf("listing before delete: %w", err) + } + + var existing []Instance //nolint:prealloc + var newInsts []Instance //nolint:prealloc + for _, inst := range insts { + var exists bool + experimentTag := GetExperimentTag(inst.Tags) + if experimentTag == "" { + newInsts = append(newInsts, inst) + continue + } + for _, d := range droplets { + if slices.Contains(d.Tags, experimentTag) { + exists = true + break + } + } + + if !exists { + newInsts = append(newInsts, inst) + continue + } + + existing = append(existing, inst) + } + + return newInsts, existing, nil +} + +// waitForNetworkIP polls until the droplet has an IPv4 of the given type ("public" or "private") +// or ctx is done. +func waitForNetworkIP(ctx context.Context, client *godo.Client, dropletID int) (pub, priv string, err error) { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + case <-ticker.C: + d, _, err := client.Droplets.Get(ctx, dropletID) + if err != nil { + return "", "", err + } + for _, net := range d.Networks.V4 { + if net.Type == "public" { + pub = net.IPAddress + } + if net.Type == "private" { + priv = net.IPAddress + } + if pub != "" && priv != "" { + return pub, priv, nil + } + } + } + } +} + +func DestroyDroplets(ctx context.Context, client *godo.Client, insts []Instance, workers int) ([]Instance, error) { + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return nil, fmt.Errorf("listing droplets: %w", err) + } + + return destroyDropletsByMatch(ctx, client, droplets, insts, workers, matchByExperimentTag) +} + +func destroyAllTalisDroplets(ctx context.Context, client *godo.Client, workers int) ([]Instance, error) { + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return nil, fmt.Errorf("listing droplets: %w", err) + } + + var talisInstances []Instance + for _, d := range droplets { + if slices.Contains(d.Tags, "talis") { + publicIP := "" + for _, net := range d.Networks.V4 { + if net.Type == "public" { + publicIP = net.IPAddress + break + } + } + talisInstances = append(talisInstances, Instance{ + Name: d.Name, + PublicIP: publicIP, + }) + } + } + + if len(talisInstances) == 0 { + log.Println("No talis droplets found to destroy") + return nil, nil + } + + return destroyDropletsByMatch(ctx, client, droplets, talisInstances, workers, matchByName) +} + +type dropletMatcher func(inst Instance, d godo.Droplet) bool + +func matchByExperimentTag(inst Instance, d godo.Droplet) bool { + experimentTag := GetExperimentTag(inst.Tags) + return experimentTag != "" && slices.Contains(d.Tags, experimentTag) +} + +func matchByName(inst Instance, d godo.Droplet) bool { + return d.Name == inst.Name +} + +func destroyDropletsByMatch(ctx context.Context, client *godo.Client, droplets []godo.Droplet, insts []Instance, workers int, matcher dropletMatcher) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + results := make(chan result, len(insts)) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, inst := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + start := time.Now() + + fmt.Println("⏳ Deleting droplet", inst.Name, inst.PublicIP) + + delCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + var matchIDs []int + for _, d := range droplets { + if matcher(inst, d) { + matchIDs = append(matchIDs, d.ID) + } + } + + if len(matchIDs) > 1 { + results <- result{inst: inst, err: fmt.Errorf("multiple droplets match %s", inst.Name)} + } + + if len(matchIDs) == 0 { + results <- result{inst: inst, err: fmt.Errorf("no droplets found for %s", inst.Name)} + return + } + + for _, id := range matchIDs { + _, err := client.Droplets.Delete(delCtx, id) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("delete %s: %w", inst.Name, err)} + return + } + + if err := waitForDeletion(delCtx, client, id); err != nil { + results <- result{inst: inst, err: fmt.Errorf("confirm delete %s: %w", inst.Name, err)} + return + } + + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + } + }(inst) + } + + go func() { + wg.Wait() + close(results) + }() + + var removed []Instance + var failed []result + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed to delete after %v: %v\n", + res.inst.Name, res.timeRequired, res.err) + failed = append(failed, res) + } else { + removed = append(removed, res.inst) + fmt.Printf("✅ %s deleted (took %v)\n", res.inst.Name, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(removed)+len(failed), len(insts)) + } + + return removed, nil +} + +// waitForDeletion polls until Get() returns a 404 Not Found or ctx is done. +func waitForDeletion(ctx context.Context, client *godo.Client, dropletID int) error { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + _, resp, err := client.Droplets.Get(ctx, dropletID) + if err != nil { + // godo returns a non-nil resp when it's an HTTP error + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + // other errors: continue polling or exit? + return err + } + // still exists; try again + } + } +} + +// listAllDroplets pages through your account’s droplets. +func listAllDroplets(ctx context.Context, client *godo.Client) ([]godo.Droplet, error) { + var all []godo.Droplet + opt := &godo.ListOptions{PerPage: 200} + for { + page, resp, err := client.Droplets.List(ctx, opt) + if err != nil { + return nil, err + } + all = append(all, page...) + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + pageNum, _ := resp.Links.CurrentPage() + opt.Page = pageNum + 1 + } + return all, nil +} + +// hasAllTags returns true if candidate contains every tag in want. +func hasAllTags(candidate, want []string) bool { + tagset := make(map[string]struct{}, len(candidate)) + for _, t := range candidate { + tagset[t] = struct{}{} + } + for _, w := range want { + if _, ok := tagset[w]; !ok { + return false + } + } + return true +} + +func checkForRunningDOExperiments(ctx context.Context, client *godo.Client, experimentID, chainID string) (bool, error) { + if client == nil { + return false, nil + } + + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return false, fmt.Errorf("failed to list droplets: %w", err) + } + + for _, d := range droplets { + if slices.Contains(d.Tags, "talis") && hasExperimentTag(d.Tags, experimentID, chainID) { + return true, nil + } + } + + return false, nil +} + +func hasExperimentTag(tags []string, experimentID, chainID string) bool { + for _, tag := range tags { + if (strings.HasPrefix(tag, "validator-") || strings.HasPrefix(tag, "bridge-") || strings.HasPrefix(tag, "light-") || strings.HasPrefix(tag, "encoder-")) && + strings.Contains(tag, experimentID) && strings.Contains(tag, chainID) { + return true + } + } + return false +} diff --git a/tools/talis/download.go b/tools/talis/download.go new file mode 100644 index 0000000000..99284d0326 --- /dev/null +++ b/tools/talis/download.go @@ -0,0 +1,261 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/spf13/cobra" +) + +func downloadCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + nodes string + table string + workers int + noCompress bool + ) + + cmd := &cobra.Command{ + Use: "download -n -t
", + Short: "Download a file from the Talis network", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators (nodes) found in config") + } + + nodes, err := filterMatchingInstances(cfg.Validators, nodes) + if err != nil { + return fmt.Errorf("failed to filter nodes: %w", err) + } + + if len(nodes) == 0 { + return fmt.Errorf("no matching nodes found") + } + + baseTracesRemotePath := "/root/.celestia-app/data/traces" + remotePaths := []string{} + switch table { + case "logs": + remotePaths = append(remotePaths, "/root/logs") + case "latency-monitor": + remotePaths = append(remotePaths, "/root/talis-latency-monitor.log") + case "txsim": + remotePaths = append(remotePaths, "/root/talis-txsim.log") + case "*", "": + path := filepath.Join(baseTracesRemotePath, "*") + remotePaths = append(remotePaths, path) + default: + if strings.Contains(table, ",") { + tables := strings.SplitSeq(table, ",") + for table := range tables { + remotePaths = append(remotePaths, filepath.Join(baseTracesRemotePath, table+".jsonl")) + } + } else { + remotePaths = append(remotePaths, filepath.Join(baseTracesRemotePath, table+".jsonl")) + } + } + + workers := make(chan struct{}, workers) + var wg sync.WaitGroup + for _, node := range nodes { + wg.Add(1) + go func() { + workers <- struct{}{} + defer func() { + wg.Done() + <-workers + }() + localPath := filepath.Join(rootDir, "data/", node.Name) + if strings.Contains(table, ",") { + filepath.Join(localPath, "traces") + } + if err := os.MkdirAll(localPath, 0o755); err != nil { + fmt.Printf("failed to create directory %s: %v\n", localPath, err) + return + } + if noCompress { + for _, remotePath := range remotePaths { + err := sftpDownload(remotePath, localPath, "root", node.PublicIP, SSHKeyPath) + if err != nil { + fmt.Printf("failed to download from %s: %v\n", node.PublicIP, err) + } + } + } else { + if err := compressAndDownload(table, localPath, "root", node.PublicIP, SSHKeyPath); err != nil { + fmt.Printf("failed to download from %s: %v\n", node.PublicIP, err) + return + } + } + if table == "logs" { + // usually, the logs from tmux also include color codes. So we will clean them up. + logFile := filepath.Join(localPath, "logs") + content, err := os.ReadFile(logFile) + if err != nil { + fmt.Printf("Error reading file: %v\n", err) + return + } + cleaned := stripANSI(string(content)) + // Write back to the same file + err = os.WriteFile(logFile, []byte(cleaned), 0o644) + if err != nil { + fmt.Printf("Error writing file: %v\n", err) + return + } + } + }() + } + + wg.Wait() + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory containing your config") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "path to your network config file") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "override path to your SSH private key") + cmd.Flags().StringVarP(&nodes, "nodes", "n", "*", "specify the node(s) to download from. * or specific nodes.") + cmd.Flags().StringVarP(&table, "tables", "t", "*", "specify tables to download (comma-separated) or logs to download logs. default is all tables.") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + cmd.Flags().BoolVar(&noCompress, "no-compress", false, "disable remote compression before download (compression is enabled by default)") + + cmd.AddCommand(downloadS3DataCmd()) + + return cmd +} + +// compressAndDownload compresses data on the remote server using xz -6 +// before downloading, then extracts locally. This significantly reduces +// bandwidth for JSONL trace files which compress very well (often 15-25x). +func compressAndDownload(table, localPath, user, host, sshKeyPath string) error { + baseTracesRemotePath := "/root/.celestia-app/data/traces" + remoteArchive := "/tmp/talis-traces.tar.xz" + + var compressCmd string + switch table { + case "logs": + compressCmd = fmt.Sprintf("tar -cf - -C /root logs | xz -6 -T0 > %s", remoteArchive) + case "latency-monitor": + compressCmd = fmt.Sprintf("tar -cf - -C /root talis-latency-monitor.log | xz -6 -T0 > %s", remoteArchive) + case "txsim": + compressCmd = fmt.Sprintf("tar -cf - -C /root talis-txsim.log | xz -6 -T0 > %s", remoteArchive) + case "*", "": + compressCmd = fmt.Sprintf("tar -cf - -C %s . | xz -6 -T0 > %s", baseTracesRemotePath, remoteArchive) + default: + var files []string + if strings.Contains(table, ",") { + for t := range strings.SplitSeq(table, ",") { + files = append(files, strings.TrimSpace(t)+".jsonl") + } + } else { + files = append(files, table+".jsonl") + } + compressCmd = fmt.Sprintf("tar -cf - -C %s %s | xz -6 -T0 > %s", + baseTracesRemotePath, strings.Join(files, " "), remoteArchive) + } + + fmt.Printf("[%s] Compressing data on remote server...\n", host) + out, err := sshExec(user, host, sshKeyPath, compressCmd) + if err != nil { + return fmt.Errorf("remote compression failed: %v\n%s", err, string(out)) + } + + fmt.Printf("[%s] Downloading compressed archive...\n", host) + if err := sftpDownload(remoteArchive, localPath, user, host, sshKeyPath); err != nil { + _, _ = sshExec(user, host, sshKeyPath, "rm -f "+remoteArchive) + return fmt.Errorf("download failed: %v", err) + } + + localArchive := filepath.Join(localPath, filepath.Base(remoteArchive)) + fmt.Printf("[%s] Extracting archive...\n", host) + extractCmd := exec.Command("tar", "-xJf", localArchive, "-C", localPath) + if extractOut, err := extractCmd.CombinedOutput(); err != nil { + return fmt.Errorf("local extraction failed: %v\n%s", err, string(extractOut)) + } + + os.Remove(localArchive) + _, _ = sshExec(user, host, sshKeyPath, "rm -f "+remoteArchive) + + fmt.Printf("[%s] Download complete.\n", host) + return nil +} + +// sshExec runs a command on a remote host via SSH and returns the combined output. +func sshExec(user, host, sshKeyPath, command string) ([]byte, error) { + cmd := exec.Command("ssh", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + fmt.Sprintf("%s@%s", user, host), + command, + ) + return cmd.CombinedOutput() +} + +func sftpDownload(remotePath, localPath, user, host, sshKeyPath string) error { + target := fmt.Sprintf("%s@%s:%s", user, host, remotePath) + + // Use `-r` always — safe for both files and dirs in practice + cmd := exec.Command("sftp", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + "-r", target, + localPath, + ) + + fmt.Printf("Running: sftp -i %s -r %s %s\n", sshKeyPath, target, localPath) + return cmd.Run() +} + +func filterMatchingInstances(insts []Instance, pattern string) ([]Instance, error) { + var filtered []Instance + for _, inst := range insts { + match, err := matchPattern(pattern, inst.Name) + if err != nil { + return nil, err + } + if match { + filtered = append(filtered, inst) + } + } + return filtered, nil +} + +// matchPattern compiles a wildcard pattern (e.g., "validator-*") +// to a regex and returns whether it matches the input string. +func matchPattern(pattern, input string) (bool, error) { + // Escape regex special characters + escaped := regexp.QuoteMeta(pattern) + + // Convert wildcard '*' to '.*' + regexPattern := "^" + strings.ReplaceAll(escaped, "\\*", ".*") + "$" + + re, err := regexp.Compile(regexPattern) + if err != nil { + return false, err + } + + return re.MatchString(input), nil +} + +// Regex to match ANSI escape codes +var ansiEscape = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]`) + +// stripANSI removes ANSI escape codes from the input string, returning a plain text version without formatting codes. +func stripANSI(input string) string { + return ansiEscape.ReplaceAllString(input, "") +} diff --git a/tools/talis/download_monitoring.go b/tools/talis/download_monitoring.go new file mode 100644 index 0000000000..a73187f6df --- /dev/null +++ b/tools/talis/download_monitoring.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/spf13/cobra" +) + +func downloadResourcesCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + nodes string + output string + workers int + ) + + cmd := &cobra.Command{ + Use: "download-resources", + Short: "Download monitoring JSONL files from remote validators", + Long: `Downloads /root/monitor.jsonl from each matching validator. +Files are saved to {output}/{validator-name}/monitor.jsonl.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + validators, err := filterMatchingInstances(cfg.Validators, nodes) + if err != nil { + return fmt.Errorf("failed to filter nodes: %w", err) + } + if len(validators) == 0 { + return fmt.Errorf("no matching validators found for pattern %q", nodes) + } + + sem := make(chan struct{}, workers) + var wg sync.WaitGroup + var mu sync.Mutex + downloaded := 0 + + for _, val := range validators { + wg.Add(1) + go func(val Instance) { + sem <- struct{}{} + defer func() { + wg.Done() + <-sem + }() + + localDir := filepath.Join(output, val.Name) + if err := os.MkdirAll(localDir, 0o755); err != nil { + fmt.Printf("[%s] failed to create directory %s: %v\n", val.Name, localDir, err) + return + } + + err := sftpDownload("/root/monitor.jsonl", localDir, "root", val.PublicIP, resolvedSSHKeyPath) + if err != nil { + fmt.Printf("[%s] failed to download monitor.jsonl: %v\n", val.Name, err) + return + } + + mu.Lock() + downloaded++ + mu.Unlock() + fmt.Printf("[%s] downloaded monitor.jsonl\n", val.Name) + }(val) + } + + wg.Wait() + + fmt.Printf("\nDownloaded monitoring data from %d/%d validator(s) to %s/\n", downloaded, len(validators), output) + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().StringVarP(&nodes, "nodes", "n", "validator-*", "glob pattern for which validators to download from") + cmd.Flags().StringVarP(&output, "output", "o", "./data/monitoring/resources", "local directory to save downloaded files") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent download workers") + + return cmd +} diff --git a/tools/talis/env.go b/tools/talis/env.go new file mode 100644 index 0000000000..fb5a6eb30f --- /dev/null +++ b/tools/talis/env.go @@ -0,0 +1,137 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +func initEnvCmd() *cobra.Command { + var provider string + + cmd := &cobra.Command{ + Use: "init-env", + Short: "Generate a .env template file", + Long: "Generate a .env template file with the required environment variables for the specified cloud provider.", + RunE: func(cmd *cobra.Command, args []string) error { + if provider == "" { + provider = "digitalocean" + } + + var envContent string + + switch provider { + case "digitalocean": + envContent = generateDigitalOceanEnv() + case "googlecloud": + envContent = generateGoogleCloudEnv() + case "aws": + envContent = generateAWSEnv() + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + + // Check if .env already exists + if _, err := os.Stat(".env"); err == nil { + return fmt.Errorf(".env file already exists. Delete it first or edit manually") + } + + // Write .env file + if err := os.WriteFile(".env", []byte(envContent), 0o600); err != nil { + return fmt.Errorf("failed to write .env file: %w", err) + } + + fmt.Printf("✅ Created .env template for %s\n", provider) + fmt.Println("\nNext steps:") + fmt.Println("1. Edit .env and fill in your credentials") + fmt.Println("2. Run: talis init -c -e --with-observability --provider", provider) + + return nil + }, + } + + cmd.Flags().StringVarP(&provider, "provider", "p", "digitalocean", "Cloud provider (digitalocean, googlecloud, aws)") + + return cmd +} + +func generateDigitalOceanEnv() string { + return `# Provider Configuration +PROVIDER=digitalocean + +# DigitalOcean Configuration +# Get your API token from: https://cloud.digitalocean.com/account/api/tokens +DIGITALOCEAN_TOKEN= + +# SSH Configuration (optional - will use defaults if not set) +# TALIS_SSH_KEY_PATH=~/.ssh/id_ed25519.pub +# TALIS_SSH_KEY_NAME=your-username + +# S3/DigitalOcean Spaces Configuration (optional - for payload distribution) +# Create a Space and generate API keys at: https://cloud.digitalocean.com/spaces +# AWS_DEFAULT_REGION=fra1 +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_S3_BUCKET= +# AWS_S3_ENDPOINT=https://fra1.digitaloceanspaces.com +` +} + +func generateGoogleCloudEnv() string { + return `# Provider Configuration +PROVIDER=googlecloud + +# Google Cloud Configuration +# Project ID from: https://console.cloud.google.com/ +GOOGLE_CLOUD_PROJECT= + +# Service account key JSON path +# Create at: https://console.cloud.google.com/iam-admin/serviceaccounts +# Download the JSON key file and set the path below +GOOGLE_CLOUD_KEY_JSON_PATH= + +# SSH Configuration (optional - will use defaults if not set) +# TALIS_SSH_KEY_PATH=~/.ssh/id_ed25519.pub +# TALIS_SSH_KEY_NAME=your-username + +# S3/DigitalOcean Spaces Configuration (optional - for payload distribution) +# You can use DigitalOcean Spaces for S3-compatible storage +# AWS_DEFAULT_REGION=fra1 +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_S3_BUCKET= +# AWS_S3_ENDPOINT=https://fra1.digitaloceanspaces.com +` +} + +func generateAWSEnv() string { + return `# Provider Configuration +PROVIDER=aws + +# AWS Credentials (used for both EC2 and the S3 payload bucket) +# Create an access key at: https://console.aws.amazon.com/iam/home#/security_credentials +# The user must have EC2 permissions (RunInstances, Terminate, Describe*, +# ImportKeyPair, CreateSecurityGroup/AuthorizeSecurityGroupIngress, +# CreatePlacementGroup, DescribeVpcs/DescribeSubnets, DescribeImages) and +# S3 (PutObject, GetObject) on the payload bucket. +# +# You can also leave these unset and use 'aws configure --profile ' + +# AWS_PROFILE= — the Go SDK picks up shared credentials automatically. +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= + +# Region for EC2 and (by default) the S3 payload bucket. +AWS_DEFAULT_REGION=us-east-1 + +# SSH Configuration +# TALIS_SSH_KEY_PATH is the local path to your SSH public key. The key is +# imported to EC2 (once per region) under TALIS_SSH_KEY_NAME. +# TALIS_SSH_KEY_PATH=~/.ssh/id_ed25519.pub +# TALIS_SSH_KEY_NAME=your-username + +# S3 Payload Bucket (optional — omit and use 'deploy --direct-payload-upload') +# Must be an S3 bucket you own in AWS_DEFAULT_REGION. +# AWS_S3_BUCKET= +` +} diff --git a/tools/talis/execution.go b/tools/talis/execution.go new file mode 100644 index 0000000000..79bdcedb35 --- /dev/null +++ b/tools/talis/execution.go @@ -0,0 +1,159 @@ +package main + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "os/exec" + "strings" + "sync" + "sync/atomic" + "time" +) + +// runScriptInTMux SSHes into each remote host in parallel, and launches +// the specified remoteScript inside a detached tmux session named sessionName. +// It uses the same timeout per host and returns a combined error if any fail. +func runScriptInTMux( + instances []Instance, + sshKeyPath string, // e.g. "~/.ssh/id_ed25519" + remoteScript string, // e.g. "source /root/start.sh" or "celestia-appd start" + sessionName string, // e.g. "app" + timeout time.Duration, +) error { + var wg sync.WaitGroup + errCh := make(chan error, len(instances)) + counter := atomic.Uint32{} + + for _, inst := range instances { + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Launch in tmux and capture output to a per-session log. + logPath := fmt.Sprintf("/root/talis-%s.log", sessionName) + scriptPath := fmt.Sprintf("/root/talis-%s.sh", sessionName) + encodedScript := base64.StdEncoding.EncodeToString([]byte("#!/usr/bin/env bash\n" + remoteScript + "\n")) + fullCmd := fmt.Sprintf( + "printf '%%s' %q | base64 -d > %s && chmod +x %s && tmux new-session -d -s %s %q", + encodedScript, + scriptPath, + scriptPath, + sessionName, + fmt.Sprintf("bash %s > %s 2>&1", scriptPath, logPath), + ) + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + fullCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] ssh error in %s: %v\n%s", + inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + + log.Printf("started %s session on %s (%s) 🏁 – %d/%d\n", + sessionName, inst.Name, inst.PublicIP, counter.Add(1), len(instances)) + }(inst) + } + + wg.Wait() + close(errCh) + + var errs []error //nolint:prealloc + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + sb := strings.Builder{} + sb.WriteString("❌ errors running remote script:\n") + for _, e := range errs { + sb.WriteString("- ") + sb.WriteString(e.Error()) + sb.WriteByte('\n') + } + return errors.New(sb.String()) + } + return nil +} + +// waitForTmuxSessions polls all instances until the named tmux session no longer +// exists on any of them (i.e. the script finished), or until the timeout expires. +func waitForTmuxSessions(instances []Instance, sshKeyPath, sessionName string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + poll := 10 * time.Second + + remaining := make(map[string]Instance, len(instances)) + for _, inst := range instances { + remaining[inst.Name] = inst + } + + for len(remaining) > 0 && time.Now().Before(deadline) { + time.Sleep(poll) + + // Check all remaining validators in parallel + type result struct { + name string + finished bool + } + results := make(chan result, len(remaining)) + for name, inst := range remaining { + go func(name string, inst Instance) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + fmt.Sprintf("tmux has-session -t %s 2>/dev/null", sessionName), + ) + err := ssh.Run() + switch { + case err == nil: + // tmux has-session exited 0 → session still running. + results <- result{name: name, finished: false} + case errors.As(err, new(*exec.ExitError)): + // Remote command ran but returned non-zero → session gone. + results <- result{name: name, finished: true} + default: + // SSH connection error (network blip, refused, etc.) → + // cannot determine session state; treat as still running. + log.Printf("warning: SSH probe failed for %s (%s): %v", name, inst.PublicIP, err) + results <- result{name: name, finished: false} + } + }(name, inst) + } + for range len(remaining) { + r := <-results + if r.finished { + log.Printf("%s session finished on %s (%s)\n", sessionName, r.name, remaining[r.name].PublicIP) + delete(remaining, r.name) + } + } + + if len(remaining) > 0 { + fmt.Printf(" still waiting on %d validator(s)...\n", len(remaining)) + } + } + + if len(remaining) > 0 { + names := make([]string, 0, len(remaining)) + for name := range remaining { + names = append(names, name) + } + return fmt.Errorf("timeout waiting for %s sessions on: %s", sessionName, strings.Join(names, ", ")) + } + return nil +} diff --git a/tools/talis/fibre.md b/tools/talis/fibre.md new file mode 100644 index 0000000000..fadff4def0 --- /dev/null +++ b/tools/talis/fibre.md @@ -0,0 +1,196 @@ +# Running Fibre Experiments with Talis + +This guide covers running Fibre throughput experiments. For general talis setup (prerequisites, installation, cloud provider config, spinning up nodes, and tearing them down), see the main [README.md](README.md). + +## Overview + +A fibre experiment has four phases: + +1. **Setup** — Register fibre host addresses and fund escrow accounts on each validator. +2. **Start fibre server** — Start the fibre server on each validator. +3. **Load generation** — Start `fibre-txsim` on one or more validators to submit blobs via the Fibre protocol. +4. **Monitoring** — Run `fibre-throughput` to observe per-block throughput in real time and optionally write structured traces to a JSONL file. + +## Prerequisites + +Follow the main [README.md](README.md) through the **deploy** step so you have a running network: + +```sh +talis init --chain-id --experiment +talis add --type validator --count +talis up +talis genesis --square-size 256 --build-dir build +talis deploy --direct-payload-upload --workers 20 +``` + +## 1. Fibre setup + +Register each validator's fibre host address and deposit tokens into escrow for all fibre worker accounts: + +```sh +talis setup-fibre +``` + +| Flag | Default | Description | +|--------------------|-----------------------|------------------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--ssh-key-path` | *(from env/config)* | Path to SSH private key | +| `--escrow-amount` | `200000000000000utia` | Amount to deposit into escrow per account | +| `--fibre-port` | `7980` | Fibre gRPC port on validators | +| `--fees` | `5000utia` | Transaction fees | +| `--workers` | `10` | Number of validators to set up in parallel | +| `--fibre-accounts` | `100` | Number of fibre worker accounts to deposit escrow for| + +This SSHes into every validator and runs the `set-host` and `deposit-to-escrow` transactions (one per fibre account). It polls tmux sessions to wait for all transactions to complete before returning. + +## 2. Start fibre server + +Start the fibre server on validators: + +```sh +talis start-fibre +``` + +| Flag | Default | Description | +|---------------------|---------------------|---------------------------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--ssh-key-path` | *(from env/config)* | Path to SSH private key | +| `--instances` | `0` (all) | Number of validators to start fibre on | +| `--otel-endpoint` | *(auto)* | OTLP HTTP endpoint for metrics/traces (auto-enabled with observability) | + +The fibre server delegates signing to the colocated validator node's PrivValidatorAPI gRPC endpoint (default `127.0.0.1:26659`). Override with `--signer-grpc-address` if needed. Metrics and traces are auto-enabled via OTLP when observability nodes are configured. + +Each validator runs the fibre server inside a tmux session called `fibre`. To stop: + +```sh +talis kill-session --session fibre +``` + +## 3. Start fibre-txsim + +Start blob submission on one or more validators: + +```sh +talis fibre-txsim --instances 4 \ + --concurrency 2 \ + --blob-size 1000000 +``` + +| Flag | Default | Description | +|------------------|---------------------|--------------------------------------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--ssh-key-path` | *(from env/config)* | Path to SSH private key | +| `--instances` | `1` | Number of validators to start fibre-txsim on | +| `--concurrency` | `1` | Concurrent blob submissions per instance (each gets its own account) | +| `--blob-size` | `1000000` | Size of each blob in bytes | +| `--interval` | `0` | Delay between submissions per worker (`0` = no delay) | +| `--duration` | `0` | How long to run (`0` = until killed) | +| `--key-prefix` | `fibre` | Key name prefix in keyring (keys are named `-0`, `-1`, ...) | + +Each concurrent worker gets its own signing key and account (e.g. `fibre-0`, `fibre-1`, ...), eliminating sequence number conflicts. + +Each instance runs inside a tmux session called `fibre-txsim` on the remote validator. To stop all instances: + +```sh +talis kill-session --session fibre-txsim +``` + +To view logs on a specific validator: + +```sh +ssh root@ 'cat /root/talis-fibre-txsim.log' +``` + +## 4. Monitor throughput + +Run `fibre-throughput` from your local machine to poll blocks and print per-block stats: + +```sh +talis fibre-throughput +``` + +This connects to the first validator's RPC endpoint and prints a line per block: + +```text +height=350 pff_txs=4 pfb_txs=0 pff_bytes=3MB pfb_bytes=0MB block_time=3.06s pff_throughput=1.02MB/s pfb_throughput=0.00MB/s +``` + +### Flags + +| Flag | Default | Description | +|------------------|------------------------------|-----------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--rpc-endpoint` | *(first validator IP:26657)* | CometBFT RPC endpoint to poll | +| `--duration` | `0` | How long to run (`0` = until Ctrl+C) | +| `--start-height` | `0` | Block height to start from (`0` = latest + 1) | +| `--with-traces` | `false` | Enable JSONL trace file output | +| `--traces-dir` | `traces/throughput` | Directory where trace files are written | + +### Writing traces + +To record structured per-block data for later analysis, enable the `--with-traces` flag: + +```sh +talis fibre-throughput --directory --with-traces +``` + +This creates a timestamped JSONL file inside the traces directory: + +```text +traces/throughput/throughput_2026-02-18T20:59:35Z.jsonl +``` + +Each run creates a new file. To use a custom directory: + +```sh +talis fibre-throughput --directory --with-traces --traces-dir my/traces +``` + +Each line in the JSONL file is a JSON object with the following fields: + +```json +{ + "height": 350, + "timestamp": "2026-02-18T20:59:33Z", + "block_time_sec": 3.06, + "pff_count": 4, + "pfb_count": 0, + "total_pff_bytes": 4000000, + "total_pfb_bytes": 0, + "pff_throughput_mbs": 1.25, + "pfb_throughput_mbs": 0 +} +``` + +| Field | Description | +|----------------------|------------------------------------------------------------| +| `height` | Block height | +| `timestamp` | Block header timestamp (RFC 3339) | +| `block_time_sec` | Seconds since the previous block | +| `pff_count` | Number of `MsgPayForFibre` transactions | +| `pfb_count` | Number of `MsgPayForBlobs` transactions | +| `total_pff_bytes` | Total PFF blob bytes in the block | +| `total_pfb_bytes` | Total PFB blob bytes in the block | +| `pff_throughput_mbs` | PFF throughput in MB/s (`pff_bytes / block_time / 1024^2`) | +| `pfb_throughput_mbs` | PFB throughput in MB/s (`pfb_bytes / block_time / 1024^2`) | + +### Replaying past blocks + +To analyze blocks from a past experiment, use `--start-height`: + +```sh +talis fibre-throughput --directory --with-traces --start-height 100 +``` + +## 5. Teardown + +When the experiment is complete: + +```sh +# Stop fibre-txsim and fibre server on all validators +talis kill-session --session fibre-txsim +talis kill-session --session fibre + +# Tear down cloud instances +talis down --workers 20 +``` diff --git a/tools/talis/fibre_bootstrap_evnode.go b/tools/talis/fibre_bootstrap_evnode.go new file mode 100644 index 0000000000..5df12d41ae --- /dev/null +++ b/tools/talis/fibre_bootstrap_evnode.go @@ -0,0 +1,212 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "sync" + "time" + + "github.com/spf13/cobra" +) + +// fibreBootstrapEvnodeCmd wires the two operator-supplied dependencies +// that ev-node needs before its init script will start the daemon: +// +// 1. Bridge admin JWT (/root/bridge-jwt.txt on the bridge box, written +// by bridge_init.sh). +// 2. cosmos-sdk file keyring with the Fibre payment account (lives at +// /root/.celestia-app/keyring-test on validator-0, populated during +// validator_init.sh + setup-fibre). +// +// Both get pulled to the operator's local machine first (keeps the +// transfers serial and observable), then pushed to every evnode-* in +// the config. After this command returns, evnode_init.sh's poll loop +// observes the files and starts the daemon. +// +// Run after `talis up && talis genesis && talis deploy && talis +// setup-fibre`. Idempotent — re-running just overwrites the files. +func fibreBootstrapEvnodeCmd() *cobra.Command { + var ( + rootDir string + sshKeyPath string + sshUser string + jwtTimeout time.Duration + ) + + cmd := &cobra.Command{ + Use: "fibre-bootstrap-evnode", + Short: "Pull bridge JWT + validator-0 keyring and push them to every ev-node instance", + Long: `After deploy + setup-fibre, this command stitches the two operator- +supplied dependencies onto each ev-node box so its init script's poll +loop unblocks and starts the daemon. SSHes to bridge-0 + validator-0 +to fetch, then SCPs to each evnode-*.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("load config: %w", err) + } + if len(cfg.Bridges) == 0 { + return fmt.Errorf("no bridges in config — run `talis add --type bridge` first") + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators in config") + } + if len(cfg.Evnodes) == 0 { + return fmt.Errorf("no evnodes in config — nothing to bootstrap") + } + bridge := cfg.Bridges[0] + validator := cfg.Validators[0] + if bridge.PublicIP == "" || bridge.PublicIP == "TBD" { + return fmt.Errorf("bridge-0 has no public IP — run `talis up` first") + } + if validator.PublicIP == "" || validator.PublicIP == "TBD" { + return fmt.Errorf("validator-0 has no public IP — run `talis up` first") + } + + tmpDir, err := os.MkdirTemp("", "talis-evnode-bootstrap-") + if err != nil { + return fmt.Errorf("create temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + + localJWT := filepath.Join(tmpDir, "bridge-jwt.txt") + localKeyringRoot := filepath.Join(tmpDir, "keyring-fibre") + if err := os.MkdirAll(localKeyringRoot, 0o700); err != nil { + return fmt.Errorf("create local keyring root: %w", err) + } + + // Pull JWT (poll-retry: bridge_init.sh writes it after + // `celestia bridge auth admin`, which can take ~30s after + // the bridge process starts). + log.Printf("Fetching bridge JWT from bridge-0 (%s) — up to %s", bridge.PublicIP, jwtTimeout) + deadline := time.Now().Add(jwtTimeout) + for { + if err := scpFromRemote(sshUser, bridge.PublicIP, sshKeyPath, "/root/bridge-jwt.txt", localJWT, false); err == nil { + if info, statErr := os.Stat(localJWT); statErr == nil && info.Size() > 0 { + break + } + } + if time.Now().After(deadline) { + return fmt.Errorf("bridge JWT not ready at /root/bridge-jwt.txt within %s — check bridge tmux session: tmux attach -t bridge", jwtTimeout) + } + time.Sleep(5 * time.Second) + } + log.Printf("✓ pulled JWT to %s", localJWT) + + // Pull validator-0's keyring directory. The cosmos-sdk + // file backend stores per-account keys under + // keyring-test/, so we mirror that layout locally so the + // outbound push lands at /root/keyring-fibre/keyring-test/ + // — exactly where evnode_init.sh expects it. + log.Printf("Fetching keyring-test from validator-0 (%s)", validator.PublicIP) + if err := scpFromRemote(sshUser, validator.PublicIP, sshKeyPath, "/root/.celestia-app/keyring-test", localKeyringRoot, true); err != nil { + return fmt.Errorf("scp keyring from validator-0: %w", err) + } + if _, err := os.Stat(filepath.Join(localKeyringRoot, "keyring-test")); err != nil { + return fmt.Errorf("keyring-test directory not present after pull (got %s): %w", localKeyringRoot, err) + } + log.Printf("✓ pulled keyring to %s/keyring-test", localKeyringRoot) + + // Push to every ev-node in parallel. The init script's + // poll loop checks every 5s, so a successful push here + // means daemon startup within ~10s. + var wg sync.WaitGroup + errCh := make(chan error, len(cfg.Evnodes)) + for _, ev := range cfg.Evnodes { + if ev.PublicIP == "" || ev.PublicIP == "TBD" { + errCh <- fmt.Errorf("evnode %s has no public IP", ev.Name) + continue + } + wg.Add(1) + go func(ev Instance) { + defer wg.Done() + log.Printf("[%s] pushing JWT + keyring", ev.Name) + + if err := scpToRemote(sshUser, ev.PublicIP, sshKeyPath, localJWT, "/root/bridge-jwt.txt", false); err != nil { + errCh <- fmt.Errorf("[%s] push JWT: %w", ev.Name, err) + return + } + + // mkdir the parent so scp lands at the exact path + // evnode_init.sh waits for. + if _, err := sshExec(sshUser, ev.PublicIP, sshKeyPath, "mkdir -p /root/keyring-fibre && rm -rf /root/keyring-fibre/keyring-test"); err != nil { + errCh <- fmt.Errorf("[%s] mkdir keyring-fibre: %w", ev.Name, err) + return + } + if err := scpToRemote(sshUser, ev.PublicIP, sshKeyPath, filepath.Join(localKeyringRoot, "keyring-test"), "/root/keyring-fibre/keyring-test", true); err != nil { + errCh <- fmt.Errorf("[%s] push keyring: %w", ev.Name, err) + return + } + + log.Printf("[%s] ✓ pushed; daemon should start within ~10s", ev.Name) + }(ev) + } + wg.Wait() + close(errCh) + var errs []error + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + for _, e := range errs { + log.Println(e) + } + return fmt.Errorf("%d evnode(s) failed to bootstrap", len(errs)) + } + log.Printf("✓ bootstrap complete for %d evnode(s)", len(cfg.Evnodes)) + return nil + }, + } + + homeDir, _ := os.UserHomeDir() + defaultKeyPath := filepath.Join(homeDir, ".ssh", "id_ed25519") + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "experiment root directory") + cmd.Flags().StringVarP(&sshKeyPath, "ssh-key-path", "s", defaultKeyPath, "SSH private key for talis instances") + cmd.Flags().StringVar(&sshUser, "ssh-user", "root", "SSH user (talis instances boot as root)") + cmd.Flags().DurationVar(&jwtTimeout, "jwt-timeout", 5*time.Minute, "max wall time to wait for the bridge JWT to appear on bridge-0") + + return cmd +} + +// scpFromRemote pulls a file or directory off a remote box. recursive=true +// uses scp -r so directories transfer with their contents. +func scpFromRemote(user, host, sshKeyPath, remotePath, localPath string, recursive bool) error { + args := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + } + if recursive { + args = append(args, "-r") + } + args = append(args, fmt.Sprintf("%s@%s:%s", user, host, remotePath), localPath) + cmd := exec.Command("scp", args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("scp pull: %w (%s)", err, string(out)) + } + return nil +} + +// scpToRemote pushes a file or directory onto a remote box. +func scpToRemote(user, host, sshKeyPath, localPath, remotePath string, recursive bool) error { + args := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + } + if recursive { + args = append(args, "-r") + } + args = append(args, localPath, fmt.Sprintf("%s@%s:%s", user, host, remotePath)) + cmd := exec.Command("scp", args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("scp push: %w (%s)", err, string(out)) + } + return nil +} diff --git a/tools/talis/fibre_setup.go b/tools/talis/fibre_setup.go new file mode 100644 index 0000000000..28f7115b80 --- /dev/null +++ b/tools/talis/fibre_setup.go @@ -0,0 +1,161 @@ +package main + +import ( + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" +) + +const SetupFibreSessionName = "setup-fibre" + +func setupFibreCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + escrowAmount string + fibrePort int + fees string + workers int + fibreAccounts int + encoderFibreAccounts int + ) + + cmd := &cobra.Command{ + Use: "setup-fibre", + Short: "Register fibre host addresses and fund escrow accounts on remote validators", + Long: "SSHes into each validator and runs transactions: register the fibre host address and fund escrow accounts for the validator and all fibre worker accounts.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + sem := make(chan struct{}, workers) + var ( + wg sync.WaitGroup + mu sync.Mutex + errs []error + ) + + for _, val := range cfg.Validators { + // Build script: register host + deposit escrow for validator + all fibre accounts + var sb strings.Builder + + // 1. Register fibre host address. Plain `host:port` form — + // x/valaddr requires it; the gRPC client dials it via the + // passthrough resolver. Don't prefix `dns:///` here. + sb.WriteString(fmt.Sprintf( + "celestia-appd tx valaddr set-host %s:%d "+ + "--from validator --keyring-backend=test --home .celestia-app "+ + "--chain-id %s --fees %s --yes\n", + val.PublicIP, fibrePort, + cfg.ChainID, fees, + )) + sb.WriteString("sleep 10\n") + + // 2. Deposit escrow for each fibre worker account + for i := range fibreAccounts { + keyName := fmt.Sprintf("fibre-%d", i) + sb.WriteString(fmt.Sprintf( + "celestia-appd tx fibre deposit-to-escrow %s "+ + "--from %s --keyring-backend=test --home .celestia-app "+ + "--chain-id %s --fees %s --yes\n", + escrowAmount, + keyName, + cfg.ChainID, fees, + )) + } + + script := sb.String() + + sem <- struct{}{} + wg.Add(1) + go func(inst Instance, s string) { + defer wg.Done() + defer func() { <-sem }() + + fmt.Printf("Running setup-fibre on %s (%s) — registering host + %d escrow deposits\n", inst.Name, inst.PublicIP, fibreAccounts) + if err := runScriptInTMux([]Instance{inst}, resolvedSSHKeyPath, s, SetupFibreSessionName, time.Minute*30); err != nil { + mu.Lock() + errs = append(errs, fmt.Errorf("%s: %w", inst.Name, err)) + mu.Unlock() + } + }(val, script) + } + + wg.Wait() + + if len(errs) > 0 { + return errors.Join(errs...) + } + + fmt.Printf("Waiting for fibre setup to complete (%d accounts per validator)...\n", fibreAccounts) + if err := waitForTmuxSessions(cfg.Validators, resolvedSSHKeyPath, SetupFibreSessionName, 10*time.Minute); err != nil { + return fmt.Errorf("waiting for setup-fibre sessions: %w", err) + } + fmt.Println("Validator setup done!") + + // Deposit escrow for encoder accounts. + // Each encoder runs deposit-to-escrow from its own machine using its + // own keyring, broadcasting via the first validator's RPC endpoint. + if len(cfg.Encoders) > 0 && len(cfg.Validators) > 0 { + rpcNode := fmt.Sprintf("tcp://%s:26657", cfg.Validators[0].PublicIP) + fmt.Printf("Setting up escrow for %d encoder(s) via %s...\n", len(cfg.Encoders), rpcNode) + + for _, enc := range cfg.Encoders { + encIndex := extractIndexFromName(enc.Name) + keyPrefix := fmt.Sprintf("enc%d", encIndex) + nAccounts := encoderFibreAccounts + + var sb strings.Builder + for i := range nAccounts { + keyName := fmt.Sprintf("%s-%d", keyPrefix, i) + sb.WriteString(fmt.Sprintf( + "celestia-appd tx fibre deposit-to-escrow %s "+ + "--from %s --keyring-backend=test --home .celestia-app "+ + "--chain-id %s --fees %s --node %s --yes\n", + escrowAmount, + keyName, + cfg.ChainID, fees, rpcNode, + )) + } + + script := sb.String() + fmt.Printf("Running escrow deposits on encoder %s (%s) — %d accounts\n", enc.Name, enc.PublicIP, nAccounts) + if err := runScriptInTMux([]Instance{enc}, resolvedSSHKeyPath, script, SetupFibreSessionName, 30*time.Minute); err != nil { + return fmt.Errorf("encoder %s escrow setup: %w", enc.Name, err) + } + } + + fmt.Printf("Waiting for encoder escrow deposits to complete...\n") + if err := waitForTmuxSessions(cfg.Encoders, resolvedSSHKeyPath, SetupFibreSessionName, 15*time.Minute); err != nil { + return fmt.Errorf("waiting for encoder setup-fibre sessions: %w", err) + } + fmt.Println("Encoder escrow setup done!") + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to the user's SSH key") + cmd.Flags().StringVar(&escrowAmount, "escrow-amount", "200000000000000utia", "amount to deposit into escrow") + cmd.Flags().IntVar(&fibrePort, "fibre-port", 7980, "fibre gRPC port on validators") + cmd.Flags().StringVar(&fees, "fees", "5000utia", "transaction fees") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of validators to set up in parallel") + cmd.Flags().IntVar(&fibreAccounts, "fibre-accounts", 100, "number of fibre worker accounts to deposit escrow for") + cmd.Flags().IntVar(&encoderFibreAccounts, "encoder-fibre-accounts", 100, "number of fibre worker accounts per encoder instance") + + return cmd +} diff --git a/tools/talis/fibre_throughput.go b/tools/talis/fibre_throughput.go new file mode 100644 index 0000000000..adcc08112a --- /dev/null +++ b/tools/talis/fibre_throughput.go @@ -0,0 +1,240 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/signal" + "path/filepath" + "time" + + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + blobtypes "github.com/celestiaorg/celestia-app/v9/x/blob/types" + fibretypes "github.com/celestiaorg/celestia-app/v9/x/fibre/types" + "github.com/cometbft/cometbft/rpc/client/http" + "github.com/spf13/cobra" +) + +type blockTrace struct { + Height int64 `json:"height"` + Timestamp string `json:"timestamp"` + BlockTimeSec float64 `json:"block_time_sec"` + PFFCount int `json:"pff_count"` + PFBCount int `json:"pfb_count"` + TotalPFFBytes int64 `json:"total_pff_bytes"` + TotalPFBBytes int64 `json:"total_pfb_bytes"` + PFFThroughputMBs float64 `json:"pff_throughput_mbs"` + PFBThroughputMBs float64 `json:"pfb_throughput_mbs"` +} + +func fibreThroughputCmd() *cobra.Command { + var ( + rootDir string + rpcEndpoint string + duration time.Duration + withTraces bool + tracesDir string + startHeight int64 + ) + + cmd := &cobra.Command{ + Use: "fibre-throughput", + Short: "Monitor real-time fibre throughput per block", + Long: "Polls blocks from a validator's RPC endpoint, decodes MsgPayForFibre transactions, and prints throughput per block.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + if rpcEndpoint == "" { + rpcEndpoint = fmt.Sprintf("http://%s:26657", cfg.Validators[0].PublicIP) + } + + fmt.Printf("RPC endpoint: %s\n", rpcEndpoint) + + client, err := http.New(rpcEndpoint, "/websocket") + if err != nil { + return fmt.Errorf("failed to create RPC client: %w", err) + } + + encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + txDecoder := encCfg.TxConfig.TxDecoder() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + go func() { + <-sigCh + fmt.Println("\nReceived interrupt, shutting down...") + cancel() + }() + + if duration > 0 { + ctx, cancel = context.WithTimeout(ctx, duration) + defer cancel() + } + + var nextHeight int64 + if startHeight > 0 { + nextHeight = startHeight + } else { + statusResp, err := client.Status(ctx) + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + nextHeight = statusResp.SyncInfo.LatestBlockHeight + 1 + } + fmt.Printf("Starting from height %d\n\n", nextHeight) + + var ( + totalBlocks int64 + totalBytes int64 + prevBlockTime time.Time + totalThroughput float64 + ) + + var traceEncoder *json.Encoder + var traceFile *os.File + if withTraces { + if err := os.MkdirAll(tracesDir, 0o755); err != nil { + return fmt.Errorf("failed to create traces directory: %w", err) + } + traceFileName := filepath.Join(tracesDir, fmt.Sprintf("throughput_%s.jsonl", time.Now().Format(time.RFC3339))) + traceFile, err = os.Create(traceFileName) + if err != nil { + return fmt.Errorf("failed to create trace file: %w", err) + } + defer traceFile.Close() + traceEncoder = json.NewEncoder(traceFile) + fmt.Printf("Writing traces to %s\n", traceFileName) + } + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for ctx.Err() == nil { + select { + case <-ctx.Done(): + continue + case <-ticker.C: + } + + // Fetch the latest height + st, err := client.Status(ctx) + if err != nil { + if ctx.Err() != nil { + continue + } + fmt.Printf("error fetching status: %v\n", err) + continue + } + latestHeight := st.SyncInfo.LatestBlockHeight + + // Process all new blocks + for h := nextHeight; h <= latestHeight && ctx.Err() == nil; h++ { + height := h + block, err := client.Block(ctx, &height) + if err != nil { + if ctx.Err() != nil { + break + } + fmt.Printf("error fetching block %d: %v\n", h, err) + continue + } + + blockTime := block.Block.Time + var blockTimeDelta float64 + if !prevBlockTime.IsZero() { + blockTimeDelta = blockTime.Sub(prevBlockTime).Seconds() + } + prevBlockTime = blockTime + + var pffCount int + var pffBytes int64 + var pfbCount int + var pfbBytes int64 + for _, rawTx := range block.Block.Txs { + sdkTx, err := txDecoder(rawTx) + if err != nil { + continue + } + for _, msg := range sdkTx.GetMsgs() { + if pff, ok := msg.(*fibretypes.MsgPayForFibre); ok { + pffCount++ + pffBytes += int64(pff.PaymentPromise.BlobSize) + continue + } + if pfb, ok := msg.(*blobtypes.MsgPayForBlobs); ok { + pfbCount++ + for _, size := range pfb.BlobSizes { + pfbBytes += int64(size) + } + } + } + } + + var pffThroughputMBs float64 + var pfbThroughputMBs float64 + if blockTimeDelta > 0 { + pffThroughputMBs = float64(pffBytes) / blockTimeDelta / (1024 * 1024) + pfbThroughputMBs = float64(pfbBytes) / blockTimeDelta / (1024 * 1024) + } + + fmt.Printf("height=%d pff_txs=%d pfb_txs=%d pff_bytes=%dMB pfb_bytes=%dMB block_time=%.2fs pff_throughput=%.2fMB/s pfb_throughput=%.2fMB/s\n", + h, pffCount, pfbCount, pffBytes/(1024*1024), pfbBytes/(1024*1024), blockTimeDelta, pffThroughputMBs, pfbThroughputMBs) + + if traceEncoder != nil { + trace := blockTrace{ + Height: h, + Timestamp: blockTime.Format(time.RFC3339), + BlockTimeSec: blockTimeDelta, + PFFCount: pffCount, + PFBCount: pfbCount, + TotalPFFBytes: pffBytes, + TotalPFBBytes: pfbBytes, + PFFThroughputMBs: pffThroughputMBs, + PFBThroughputMBs: pfbThroughputMBs, + } + if err := traceEncoder.Encode(trace); err != nil { + fmt.Printf("error writing trace: %v\n", err) + } + } + + totalBytes += pffBytes + if blockTimeDelta > 0 { + totalBlocks++ + totalThroughput += pffThroughputMBs + } + + nextHeight = h + 1 + } + } + + fmt.Printf("\n--- Summary ---\n") + fmt.Printf("Total blocks: %d\n", totalBlocks) + fmt.Printf("Total bytes: %d\n", totalBytes) + if totalBlocks > 0 { + fmt.Printf("Avg throughput: %.2f MB/s\n", totalThroughput/float64(totalBlocks)) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVar(&rpcEndpoint, "rpc-endpoint", "", "CometBFT RPC endpoint (default: first validator IP:26657)") + cmd.Flags().DurationVar(&duration, "duration", 0, "how long to run (0 = until Ctrl+C)") + cmd.Flags().BoolVar(&withTraces, "with-traces", false, "enable JSONL trace file output") + cmd.Flags().StringVar(&tracesDir, "traces-dir", "./data/monitoring/throughput", "directory for trace files") + cmd.Flags().Int64Var(&startHeight, "start-height", 0, "block height to start from (0 = latest + 1)") + + return cmd +} diff --git a/tools/talis/fibre_txsim.go b/tools/talis/fibre_txsim.go new file mode 100644 index 0000000000..3cc2b5ba29 --- /dev/null +++ b/tools/talis/fibre_txsim.go @@ -0,0 +1,178 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const FibreTxSimSessionName = "fibre-txsim" + +func fibreTxsimCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + instances int + concurrency int + blobSize int + interval time.Duration + duration time.Duration + keyPrefix string + download bool + uploadOnly bool + pyroscopeEndpoint string + onEncoders bool + ) + + cmd := &cobra.Command{ + Use: "fibre-txsim", + Short: "Start fibre-txsim on remote validators or encoder instances via SSH + tmux", + Long: "Starts fibre-txsim tmux sessions on remote validators or dedicated encoder instances. The fibre-txsim binary must already be deployed via 'talis deploy' (built by 'make build-talis-bins').", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + if onEncoders { + return startFibreTxsimOnEncoders(cfg, resolvedSSHKeyPath, instances, concurrency, blobSize, interval, duration, download, uploadOnly, pyroscopeEndpoint) + } + + // Legacy mode: run fibre-txsim on validators themselves + n := min(instances, len(cfg.Validators)) + validators := cfg.Validators[:n] + + // Build the remote command — binaries are copied to /bin/ by validator_init.sh + // OTEL_METRICS_EXEMPLAR_FILTER=always_on attaches trace exemplars to all metric observations + remoteCmd := fmt.Sprintf( + "OTEL_METRICS_EXEMPLAR_FILTER=always_on fibre-txsim --chain-id %s --grpc-endpoint localhost:9091 --keyring-dir .celestia-app --key-prefix %s --blob-size %d --concurrency %d --interval %s --duration %s --download=%t --upload-only=%t", + cfg.ChainID, + keyPrefix, + blobSize, + concurrency, + interval, + duration, + download, + uploadOnly, + ) + + // Auto-wire observability endpoints when observability nodes are configured + if len(cfg.Observability) > 0 { + remoteCmd += fmt.Sprintf(" --otel-endpoint http://%s:4318", cfg.Observability[0].PublicIP) + if pyroscopeEndpoint == "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint http://%s:4040", cfg.Observability[0].PublicIP) + } + } + if pyroscopeEndpoint != "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint %s", pyroscopeEndpoint) + } + + fmt.Printf("Starting fibre-txsim sessions on %d validator(s)...\n", len(validators)) + + if err := runScriptInTMux(validators, resolvedSSHKeyPath, remoteCmd, FibreTxSimSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start remote sessions: %w", err) + } + + printFibreTxsimSummary(validators) + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().IntVar(&instances, "instances", 1, "number of instances to start fibre-txsim on") + cmd.Flags().IntVar(&concurrency, "concurrency", 1, "number of concurrent blob submissions per instance") + cmd.Flags().IntVar(&blobSize, "blob-size", 1000000, "size of each blob in bytes") + cmd.Flags().DurationVar(&interval, "interval", 0, "delay between blob submissions (0 = no delay)") + cmd.Flags().DurationVar(&duration, "duration", 0, "how long to run (0 = until killed)") + cmd.Flags().StringVar(&keyPrefix, "key-prefix", "fibre", "key name prefix in keyring (keys are named -0, -1, ...)") + cmd.Flags().BoolVar(&download, "download", false, "enable download verification after each successful upload (downloads blob back and compares with original data)") + cmd.Flags().BoolVar(&uploadOnly, "upload-only", false, "skip PFF transaction — only upload shards to validators without on-chain confirmation") + cmd.Flags().StringVar(&pyroscopeEndpoint, "pyroscope-endpoint", "", "Pyroscope endpoint for continuous profiling (default: auto-detected from observability config, e.g. http://host:4040)") + cmd.Flags().BoolVar(&onEncoders, "on-encoders", false, "run fibre-txsim on dedicated encoder instances instead of validators") + + return cmd +} + +// startFibreTxsimOnEncoders launches fibre-txsim on each encoder instance. +// Each encoder is mapped to a validator (round-robin) and uses a unique key +// prefix (enc0, enc1, ...) so that their escrow accounts are independent. +func startFibreTxsimOnEncoders(cfg Config, sshKeyPath string, instances, concurrency, blobSize int, interval, duration time.Duration, download, uploadOnly bool, pyroscopeEndpoint string) error { + if len(cfg.Encoders) == 0 { + return fmt.Errorf("no encoder instances found in config — add encoders via 'talis add -t encoder'") + } + + n := min(instances, len(cfg.Encoders)) + encoders := cfg.Encoders[:n] + + fmt.Printf("Starting fibre-txsim on %d encoder(s)...\n", len(encoders)) + + for _, enc := range encoders { + encIndex := extractIndexFromName(enc.Name) + // Round-robin map encoder → validator for gRPC endpoint + valIndex := encIndex % len(cfg.Validators) + grpcEndpoint := fmt.Sprintf("%s:9091", cfg.Validators[valIndex].PublicIP) + encKeyPrefix := fmt.Sprintf("enc%d", encIndex) + + remoteCmd := fmt.Sprintf( + // Encoders keep their per-encoder keyring under + // /root/encoder-payload//keyring-test/, never copied to + // the default ~/.celestia-app/keyring-test by the deploy step; + // point fibre-txsim at the right directory directly so it can + // load enc-* keys. + "OTEL_METRICS_EXEMPLAR_FILTER=always_on fibre-txsim --chain-id %s --grpc-endpoint %s --keyring-dir encoder-payload/%s --key-prefix %s --blob-size %d --concurrency %d --interval %s --duration %s --download=%t --upload-only=%t", + cfg.ChainID, + grpcEndpoint, + enc.Name, + encKeyPrefix, + blobSize, + concurrency, + interval, + duration, + download, + uploadOnly, + ) + + // Auto-wire observability endpoints + if len(cfg.Observability) > 0 { + remoteCmd += fmt.Sprintf(" --otel-endpoint http://%s:4318", cfg.Observability[0].PublicIP) + if pyroscopeEndpoint == "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint http://%s:4040", cfg.Observability[0].PublicIP) + } + } + if pyroscopeEndpoint != "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint %s", pyroscopeEndpoint) + } + + fmt.Printf(" encoder %s → validator %s (grpc=%s, keys=%s-*)\n", + enc.Name, cfg.Validators[valIndex].Name, grpcEndpoint, encKeyPrefix) + + if err := runScriptInTMux([]Instance{enc}, sshKeyPath, remoteCmd, FibreTxSimSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start fibre-txsim on encoder %s: %w", enc.Name, err) + } + } + + printFibreTxsimSummary(encoders) + return nil +} + +func printFibreTxsimSummary(instances []Instance) { + fmt.Println() + fmt.Println("=== fibre-txsim sessions started ===") + fmt.Printf(" tmux session: %s\n", FibreTxSimSessionName) + fmt.Printf(" log file: /root/talis-%s.log\n", FibreTxSimSessionName) + fmt.Println(" instances:") + for _, inst := range instances { + fmt.Printf(" - %s (%s)\n", inst.Name, inst.PublicIP) + } + fmt.Println() + fmt.Printf(" To kill all: talis kill-session -s %s\n", FibreTxSimSessionName) + fmt.Printf(" To view logs: ssh root@ 'cat /root/talis-%s.log'\n", FibreTxSimSessionName) +} diff --git a/tools/talis/genesis.go b/tools/talis/genesis.go new file mode 100644 index 0000000000..9a4f481a81 --- /dev/null +++ b/tools/talis/genesis.go @@ -0,0 +1,779 @@ +package main + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "github.com/celestiaorg/celestia-app/v9/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v9/test/util/genesis" + "github.com/spf13/cobra" +) + +const ( + chainIDFlag = "chainID" + rootDirFlag = "directory" +) + +// generateCmd is the Cobra command for creating the payload for the experiment. +func generateCmd() *cobra.Command { + var ( + rootDir string + chainID string // will overwrite that in the config + squareSize int + buildDirPath string + appBinaryPath string + nodeBinaryPath string + txsimBinaryPath string + latencyMonitorBinaryPath string + fibreBinaryPath string + fibreTxsimBinaryPath string + observabilityDirPath string + useMainnetStakingDistribution bool + fibreAccounts int + encoderFibreAccounts int + ) + cmd := &cobra.Command{ + Use: "genesis", + Short: "Create a genesis for the network.", + Long: "Create a genesis for the network along with everything else needed to start the network. Call this only after init and add.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if chainID != "" { + cfg = cfg.WithChainID(chainID) + } + + payloadDir := filepath.Join(rootDir, "payload") + + if err := os.RemoveAll(payloadDir); err != nil { + return fmt.Errorf("failed to remove old payload directory: %w", err) + } + if err := os.RemoveAll(filepath.Join(rootDir, "encoder-payload")); err != nil { + return fmt.Errorf("failed to remove old encoder-payload directory: %w", err) + } + + err = createPayload(cfg.Validators, cfg.Encoders, cfg.ChainID, payloadDir, squareSize, useMainnetStakingDistribution, fibreAccounts, encoderFibreAccounts) + if err != nil { + log.Fatalf("Failed to create payload: %v", err) + } + + srcAppConfig := filepath.Join(rootDir, "app.toml") + + for _, v := range cfg.Validators { + valDir := filepath.Join(payloadDir, v.Name) + // Note: per-validator config.toml is written by Network.InitNodes + // with the correct persistent_peers list. Don't overwrite it + // here — that would clobber the peer list and the chain comes + // up with zero peers. + + if err := copyFile(srcAppConfig, filepath.Join(valDir, "app.toml"), 0o755); err != nil { + return fmt.Errorf("failed to copy app.toml: %w", err) + } + } + + if err := copyDir(filepath.Join(rootDir, "scripts"), filepath.Join(rootDir, "payload")); err != nil { + return fmt.Errorf("failed to copy scripts: %w", err) + } + + buildDest := filepath.Join(payloadDir, "build") + if buildDirPath != "" { + info, err := os.Stat(buildDirPath) + if err != nil { + return fmt.Errorf("failed to stat build directory %q: %w", buildDirPath, err) + } + if !info.IsDir() { + return fmt.Errorf("build path %q is not a directory", buildDirPath) + } + if err := copyDir(buildDirPath, buildDest); err != nil { + return fmt.Errorf("failed to copy build directory: %w", err) + } + } else { + if err := copyFile(appBinaryPath, filepath.Join(buildDest, "celestia-appd"), 0o755); err != nil { + return fmt.Errorf("failed to copy app binary: %w", err) + } + + if err := copyFile(nodeBinaryPath, filepath.Join(buildDest, "celestia"), 0o755); err != nil { + log.Println("failed to copy celestia binary, bridge and light nodes will not be able to start") + } + + if err := copyFile(txsimBinaryPath, filepath.Join(buildDest, "txsim"), 0o755); err != nil { + return fmt.Errorf("failed to copy txsim binary: %w", err) + } + + // Copy latency monitor binary + if err := copyFile(latencyMonitorBinaryPath, filepath.Join(buildDest, "latency-monitor"), 0o755); err != nil { + log.Printf("failed to copy latency monitor binary: %v", err) + } + + // Copy fibre server binary + if err := copyFile(fibreBinaryPath, filepath.Join(buildDest, "fibre"), 0o755); err != nil { + log.Printf("failed to copy fibre binary: %v", err) + } + + // Copy fibre-txsim binary + if err := copyFile(fibreTxsimBinaryPath, filepath.Join(buildDest, "fibre-txsim"), 0o755); err != nil { + log.Printf("failed to copy fibre-txsim binary: %v", err) + } + } + + if err := writeAWSEnv(filepath.Join(payloadDir, "vars.sh"), cfg); err != nil { + return fmt.Errorf("failed to write aws env: %w", err) + } + + if err := stageObservabilityPayload(cfg, observabilityDirPath, payloadDir); err != nil { + return fmt.Errorf("failed to stage observability payload: %w", err) + } + + // Stage encoder payload: copy binaries, genesis, and vars to the + // encoder-payload directory so deploy can create a lightweight tar. + if len(cfg.Encoders) > 0 { + if err := stageEncoderPayload(rootDir, payloadDir, appBinaryPath, fibreTxsimBinaryPath, buildDirPath); err != nil { + return fmt.Errorf("failed to stage encoder payload: %w", err) + } + } + + // Stage bridge payload: celestia-node binary + genesis + init + // script. Each bridge points at validator-0's RPC for header + // sync; talis up has already populated cfg.Validators[0].PublicIP. + if len(cfg.Bridges) > 0 { + if len(cfg.Validators) == 0 { + return fmt.Errorf("bridges configured but no validators — bring up validators first") + } + if err := stageBridgePayload(rootDir, payloadDir, nodeBinaryPath, buildDirPath, cfg); err != nil { + return fmt.Errorf("failed to stage bridge payload: %w", err) + } + } + + // Stage ev-node payload: evnode binary + templated init script. + // ev-node needs the bridge JWT + a funded fibre keyring, both + // of which are scp'd in a separate `talis fibre-bootstrap-evnode` + // step (or by hand) — the init script polls for them and only + // starts the daemon once they exist. + if len(cfg.Evnodes) > 0 { + if len(cfg.Validators) == 0 { + return fmt.Errorf("evnodes configured but no validators — bring up validators first") + } + if len(cfg.Bridges) == 0 { + return fmt.Errorf("evnodes configured but no bridges — at least one bridge is required") + } + if err := stageEvnodePayload(rootDir, payloadDir, buildDirPath, cfg); err != nil { + return fmt.Errorf("failed to stage evnode payload: %w", err) + } + } + + // Stage loadgen payload: evnode-txsim binary + init script + // templated with evnode-0's HTTP endpoint as the target. + if len(cfg.Loadgens) > 0 { + if len(cfg.Evnodes) == 0 { + return fmt.Errorf("loadgens configured but no evnodes — at least one ev-node is required") + } + if err := stageLoadgenPayload(rootDir, payloadDir, buildDirPath, cfg); err != nil { + return fmt.Errorf("failed to stage loadgen payload: %w", err) + } + } + + return cfg.Save(rootDir) + }, + } + + gopath := os.Getenv("GOPATH") + if gopath == "" { + home, err := os.UserHomeDir() + if err != nil { + panic("failed to determine home dir: " + err.Error()) + } + gopath = filepath.Join(home, "go") + } + gopath = filepath.Join(gopath, "bin") + + cmd.Flags().StringVarP(&chainID, chainIDFlag, "c", "", "Override the chainID in the config") + cmd.Flags().StringVarP(&rootDir, rootDirFlag, "d", ".", "root directory in which to initialize (default is the current directory)") + cmd.Flags().IntVarP(&squareSize, "ods-size", "s", appconsts.SquareSizeUpperBound, "The size of the ODS for the network (make sure to also build a celestia-app binary with a greater SquareSizeUpperBound)") + cmd.Flags().StringVarP(&buildDirPath, "build-dir", "b", "", "directory containing binaries to include in the payload") + cmd.Flags().StringVarP(&appBinaryPath, "app-binary", "a", filepath.Join(gopath, "celestia-appd"), "app binary to include in the payload (assumes the binary is installed") + cmd.Flags().StringVarP(&nodeBinaryPath, "node-binary", "n", filepath.Join(gopath, "celestia"), "node binary to include in the payload (assumes the binary is installed") + cmd.Flags().StringVarP(&txsimBinaryPath, "txsim-binary", "t", filepath.Join(gopath, "txsim"), "txsim binary to include in the payload (assumes the binary is installed)") + cmd.Flags().StringVar(&latencyMonitorBinaryPath, "latency-monitor-binary", filepath.Join(gopath, "latency-monitor"), "latency monitor binary to include in the payload") + cmd.Flags().StringVar(&fibreBinaryPath, "fibre-binary", filepath.Join(gopath, "fibre"), "fibre server binary to include in the payload") + cmd.Flags().StringVar(&fibreTxsimBinaryPath, "fibre-txsim-binary", filepath.Join(gopath, "fibre-txsim"), "fibre-txsim binary to include in the payload") + cmd.Flags().StringVar(&observabilityDirPath, "observability-dir", "", "path to observability directory containing docker-compose, Prometheus config, and scripts (required if observability nodes are configured)") + cmd.Flags().BoolVarP(&useMainnetStakingDistribution, "mainnet-staking-distribution", "m", false, "replace the default uniform staking distribution with the actual mainnet distribution") + cmd.Flags().IntVar(&fibreAccounts, "fibre-accounts", 100, "number of pre-funded fibre accounts to create per validator") + cmd.Flags().IntVar(&encoderFibreAccounts, "encoder-fibre-accounts", 100, "number of pre-funded fibre accounts to create per encoder instance") + + return cmd +} + +// createPayload takes ips created by pulumi and the path to the payload directory +// to create the payload required for the experiment. +func createPayload(ips, encoders []Instance, chainID, ppath string, squareSize int, useMainnetDistribution bool, fibreAccounts, encoderFibreAccounts int, mods ...genesis.Modifier) error { + n, err := NewNetwork(chainID, squareSize, mods...) + if err != nil { + return err + } + + stake := int64(genesis.DefaultInitialBalance) / 2 + for index, info := range ips { + if useMainnetDistribution { + stake = getMainnetStake(index) + } + err = n.AddValidator( + info.Name, + info.PublicIP, + ppath, + info.Region, + stake, + fibreAccounts, + ) + if err != nil { + return err + } + } + + // Create encoder-payload directory and keyrings for each encoder. + // Encoder keyrings are stored in /../encoder-payload// + // so that a separate, lighter tar can be built during deploy. + encoderPayloadDir := filepath.Join(filepath.Dir(ppath), "encoder-payload") + if len(encoders) > 0 { + if err := os.MkdirAll(encoderPayloadDir, 0o755); err != nil { + return fmt.Errorf("failed to create encoder-payload dir: %w", err) + } + } + for _, enc := range encoders { + if err := n.AddEncoder(enc.Name, encoderPayloadDir, encoderFibreAccounts); err != nil { + return fmt.Errorf("failed to add encoder %s: %w", enc.Name, err) + } + } + + for _, val := range n.genesis.Validators() { + fmt.Println(val.Name, val.ConsensusKey.PubKey()) + } + + err = n.InitNodes(ppath) + if err != nil { + return err + } + + err = n.SaveAddressBook(ppath, n.Peers()) + if err != nil { + return err + } + + return nil +} + +// mainnetVotingPowers contains the current Celestia mainnet staking distribution for more realistic tests. +var mainnetVotingPowers []int + +func getMainnetStake(index int) int64 { + if index < 0 { + return 0 + } + if len(mainnetVotingPowers) == 0 { + // these figures reflect the exact staking values on 09/07/25. + mainnetVotingPowers = []int{ + 44706511, 44437002, 37932228, 37544929, 29421912, 27045838, 25722376, 25574864, 19573478, 17083572, + 14156979, 10990505, 10228508, 8017107, 7985256, 7465738, 7156557, 7000454, 6957695, 6816721, + 6497714, 6133878, 6061770, 6023778, 5837045, 5817421, 5788259, 5571126, 5504182, 5500773, + 5070168, 4672609, 4360060, 4326293, 3978439, 3894538, 3746172, 3608145, 3606324, 3606128, + 3600486, 3560552, 3538637, 3456887, 3449504, 3365860, 3330140, 3329077, 3242441, 3231836, + 3163103, 3162476, 3139329, 3132732, 3117200, 3071253, 3059325, 3043103, 3039694, 3038574, + 3038322, 3025332, 3025137, 3013047, 3011854, 3010337, 3004185, 3001607, 3000732, 3000592, + 3000433, 3000236, 3000215, 3000207, 3000142, 3000128, 3000126, 2689474, 2500012, 2329666, + 2242943, 2083890, 2038490, 1957574, 1619120, 1615290, 1482045, 1291544, 1286175, 1204480, + 1202416, 1156152, 1137365, 1101315, 1045017, 1000381, 977562, 948538, 820448, 445353, + } + } + if index >= len(mainnetVotingPowers) { + return int64(mainnetVotingPowers[len(mainnetVotingPowers)-1]) + } + return int64(mainnetVotingPowers[index]) +} + +// stageEncoderPayload copies the binaries (celestia-appd, fibre-txsim), genesis, +// vars.sh, and an encoder_init.sh script into the encoder-payload directory so +// that the deploy step can create a lightweight tar for encoder instances. +func stageEncoderPayload(rootDir, payloadDir, appBinaryPath, fibreTxsimBinaryPath, buildDirPath string) error { + encPayload := filepath.Join(rootDir, "encoder-payload") + + // Build directory with only the two binaries an encoder needs + encBuild := filepath.Join(encPayload, "build") + if err := os.MkdirAll(encBuild, 0o755); err != nil { + return err + } + + if buildDirPath != "" { + for _, name := range []string{"celestia-appd", "fibre-txsim"} { + src := filepath.Join(buildDirPath, name) + if err := copyFile(src, filepath.Join(encBuild, name), 0o755); err != nil { + return fmt.Errorf("copy %s from build dir: %w", name, err) + } + } + } else { + if err := copyFile(appBinaryPath, filepath.Join(encBuild, "celestia-appd"), 0o755); err != nil { + return fmt.Errorf("copy celestia-appd: %w", err) + } + if err := copyFile(fibreTxsimBinaryPath, filepath.Join(encBuild, "fibre-txsim"), 0o755); err != nil { + return fmt.Errorf("copy fibre-txsim: %w", err) + } + } + + // Copy genesis and vars.sh + if err := copyFile(filepath.Join(payloadDir, "genesis.json"), filepath.Join(encPayload, "genesis.json"), 0o644); err != nil { + return fmt.Errorf("copy genesis.json: %w", err) + } + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(encPayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + // Write the encoder init script + return writeEncoderInitScript(filepath.Join(encPayload, "encoder_init.sh")) +} + +// stageBridgePayload copies the celestia-node binary, the consensus +// chain's genesis.json, and a templated bridge_init.sh into a +// bridge-payload directory. Deploy uses this to ship a lightweight tar +// to each bridge instance. The first validator's public IP is baked +// into the init script as core.ip — bridges follow validator-0 for +// header / block sync. With a multi-validator chain, validator-0 is a +// fine choice since headers come from consensus regardless. +func stageBridgePayload(rootDir, payloadDir, nodeBinaryPath, buildDirPath string, cfg Config) error { + bridgePayload := filepath.Join(rootDir, "bridge-payload") + + if err := os.RemoveAll(bridgePayload); err != nil { + return fmt.Errorf("clean old bridge-payload: %w", err) + } + + bridgeBuild := filepath.Join(bridgePayload, "build") + if err := os.MkdirAll(bridgeBuild, 0o755); err != nil { + return err + } + + // celestia-node's binary is named "celestia". --build-dir wins over + // the per-binary path so a single packed dir can drive validator + + // bridge + ev-node deploys. + if buildDirPath != "" { + src := filepath.Join(buildDirPath, "celestia") + if err := copyFile(src, filepath.Join(bridgeBuild, "celestia"), 0o755); err != nil { + return fmt.Errorf("copy celestia from build dir: %w", err) + } + } else { + if err := copyFile(nodeBinaryPath, filepath.Join(bridgeBuild, "celestia"), 0o755); err != nil { + return fmt.Errorf("copy celestia binary: %w", err) + } + } + + if err := copyFile(filepath.Join(payloadDir, "genesis.json"), filepath.Join(bridgePayload, "genesis.json"), 0o644); err != nil { + return fmt.Errorf("copy genesis.json: %w", err) + } + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(bridgePayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + coreIP := cfg.Validators[0].PublicIP + if coreIP == "" || coreIP == "TBD" { + return fmt.Errorf("validator-0 has no public IP yet — run `talis up` before genesis") + } + + return writeBridgeInitScript(filepath.Join(bridgePayload, "bridge_init.sh"), coreIP) +} + +// writeBridgeInitScript writes the per-bridge init script. It runs +// `celestia bridge init`, points the bridge at validator-0's gRPC for +// state sync, generates an admin JWT (printed to a known file so +// downstream ev-node deploys can scp it), and starts the bridge in a +// detached tmux session. +// +// All values that change per-experiment are baked in at staging time. +// CHAIN_ID comes from sourced vars.sh; coreIP is templated literally +// since it's only known after talis up has populated config.json. +func writeBridgeInitScript(path string, coreIP string) error { + script := `#!/bin/bash +set -euo pipefail + +CELES_BRIDGE_HOME="$HOME/.celestia-bridge" +CORE_IP="` + coreIP + `" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl jq chrony tmux --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +# TCP BBR — same tuning as validators / encoders. +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +# Install celestia-node binary +cp bridge-payload/build/celestia /bin/celestia +chmod +x /bin/celestia + +source bridge-payload/vars.sh +echo "Bridge bootstrap: chain_id=$CHAIN_ID core_ip=$CORE_IP" + +# Initialize node store. p2p.network is the chain id; celestia-node +# accepts it because vars.sh exported CELESTIA_CUSTOM=$CHAIN_ID, +# which registers the chain id as a custom network at startup. +if [ ! -f "$CELES_BRIDGE_HOME/config.toml" ]; then + celestia bridge init --p2p.network "$CHAIN_ID" --node.store "$CELES_BRIDGE_HOME" +fi + +# Drop the consensus chain's genesis next to the bridge config so +# anything that reads it (peer discovery, header validation) sees +# the same genesis as validators. +mkdir -p "$CELES_BRIDGE_HOME/config" +cp bridge-payload/genesis.json "$CELES_BRIDGE_HOME/genesis.json" + +# Generate the admin JWT and stash it where downstream consumers +# (ev-node deploy) can scp it. With CELESTIA_CUSTOM set, celestia +# prints a multi-line "WARNING: custom network..." banner to stdout +# alongside the token, so we grep for the JWT line specifically — +# otherwise downstream consumers send the warning text as the auth +# header and get a 401. +celestia bridge auth admin --node.store "$CELES_BRIDGE_HOME" 2>/dev/null \ + | grep -E '^eyJ' | tail -1 > /root/bridge-jwt.txt +echo "Wrote /root/bridge-jwt.txt ($(wc -c < /root/bridge-jwt.txt) bytes)" + +ufw allow 26658/tcp || true # RPC (admin API) +ufw allow 2121/tcp || true # P2P +ufw allow 2121/udp || true + +# Run in tmux so the SSH session can detach. RPC is exposed on +# 0.0.0.0:26658 (auth required via JWT). Core gRPC connection to +# validator-0 is plaintext for testnet. +tmux kill-session -t bridge 2>/dev/null || true +# tmux sessions inherit env from the tmux server, not the caller, so +# CELESTIA_CUSTOM has to be re-exported inside the inner command. +tmux new-session -d -s bridge "env CELESTIA_CUSTOM=${CHAIN_ID} celestia bridge start \ + --p2p.network ${CHAIN_ID} \ + --node.store ${CELES_BRIDGE_HOME} \ + --core.ip ${CORE_IP} \ + --core.port 9091 \ + --core.tls=false \ + --rpc.addr 0.0.0.0 \ + --rpc.port 26658 \ + --metrics 2>&1 | tee -a /root/bridge.log" + +echo "Bridge started in tmux session 'bridge' — attach with: tmux attach -t bridge" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +// stageEvnodePayload copies the evnode-fibre binary + a templated init +// script into evnode-payload/ so the deploy step can build a small tar +// per ev-node. The init script poll-waits for /root/bridge-jwt.txt and +// /root/keyring-fibre/ to exist before starting — both are scp'd in by +// a separate bootstrap step (or manually) so that JWT + keyring don't +// need to be embedded in the payload. +func stageEvnodePayload(rootDir, payloadDir, buildDirPath string, cfg Config) error { + evPayload := filepath.Join(rootDir, "evnode-payload") + + if err := os.RemoveAll(evPayload); err != nil { + return fmt.Errorf("clean old evnode-payload: %w", err) + } + + evBuild := filepath.Join(evPayload, "build") + if err := os.MkdirAll(evBuild, 0o755); err != nil { + return err + } + + if buildDirPath == "" { + return fmt.Errorf("--build-dir is required when evnodes are configured (must contain `evnode` binary)") + } + src := filepath.Join(buildDirPath, "evnode") + if err := copyFile(src, filepath.Join(evBuild, "evnode"), 0o755); err != nil { + return fmt.Errorf("copy evnode from build dir: %w", err) + } + + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(evPayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + bridgeIP := cfg.Bridges[0].PublicIP + coreIP := cfg.Validators[0].PublicIP + if bridgeIP == "" || bridgeIP == "TBD" { + return fmt.Errorf("bridge-0 has no public IP yet — run `talis up` before genesis") + } + if coreIP == "" || coreIP == "TBD" { + return fmt.Errorf("validator-0 has no public IP yet — run `talis up` before genesis") + } + + return writeEvnodeInitScript(filepath.Join(evPayload, "evnode_init.sh"), bridgeIP, coreIP) +} + +// writeEvnodeInitScript writes the evnode aggregator init script. +// Templated values: BRIDGE_IP (bridge-0 RPC for blob.Subscribe / Submit) +// and CORE_GRPC_ADDR (validator-0 gRPC for state queries via +// celestia-node's submit path). CHAIN_ID flows through vars.sh. +// +// The script does NOT copy bridge-jwt.txt or the fibre keyring itself — +// those must already exist on the box (manually scp'd or pushed by a +// future `talis fibre-bootstrap-evnode` command). The poll loop makes +// the script restartable: re-running deploy after copying the missing +// pieces will cleanly start the daemon. +func writeEvnodeInitScript(path string, bridgeIP, coreIP string) error { + script := `#!/bin/bash +set -euo pipefail + +EVNODE_HOME="$HOME/.evnode-fibre" +# celestia-node API client requires a URL scheme on the bridge addr. +BRIDGE_ADDR="http://` + bridgeIP + `:26658" +# celestia-app exposes Tendermint RPC services on :9090 and the +# cosmos.* / celestia.* state services on :9091. The fiber adapter's +# submit path queries cosmos.base.tendermint.v1beta1.Service so it +# needs the :9091 endpoint. +CORE_GRPC_ADDR="` + coreIP + `:9091" +BRIDGE_JWT_FILE="/root/bridge-jwt.txt" +FIBRE_KEYRING_DIR="/root/keyring-fibre" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl jq chrony tmux --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +cp evnode-payload/build/evnode /bin/evnode +chmod +x /bin/evnode + +source evnode-payload/vars.sh +echo "evnode bootstrap: chain_id=$CHAIN_ID bridge=$BRIDGE_ADDR core=$CORE_GRPC_ADDR" + +mkdir -p "$EVNODE_HOME" + +# Wait for the operator-supplied dependencies. These come from a +# separate step (manual scp or 'talis fibre-bootstrap-evnode'): +# 1. /root/bridge-jwt.txt admin JWT from the bridge +# 2. /root/keyring-fibre/keyring-test cosmos-sdk file keyring with +# a Fibre payment account +# Without them the daemon would crash immediately on startup. +echo "Waiting for $BRIDGE_JWT_FILE and $FIBRE_KEYRING_DIR..." +WAITED=0 +until [ -s "$BRIDGE_JWT_FILE" ] && [ -d "$FIBRE_KEYRING_DIR/keyring-test" ]; do + sleep 5 + WAITED=$((WAITED + 5)) + if [ $((WAITED % 60)) -eq 0 ]; then + echo " still waiting after ${WAITED}s..." + fi +done +echo "Dependencies present after ${WAITED}s" + +ufw allow 7777/tcp || true # tx-ingest HTTP +ufw allow 7331/tcp || true # ev-node RPC +ufw allow 7676/tcp || true # libp2p (idle when Fiber on) + +# A passphrase file keeps the file-signer reproducible across restarts +# without baking creds into the script. +mkdir -p "$EVNODE_HOME/.signer" +if [ ! -f "$EVNODE_HOME/.signer/passphrase" ]; then + echo "evnode-fibre-passphrase" > "$EVNODE_HOME/.signer/passphrase" + chmod 600 "$EVNODE_HOME/.signer/passphrase" +fi + +tmux kill-session -t evnode 2>/dev/null || true +# CELESTIA_CUSTOM has to be present in the env that evnode runs under, +# not the caller's: tmux sessions inherit from the tmux server, not the +# shell that issues new-session. Without it, celestia-node refuses to +# accept --core-network=$CHAIN_ID since it's not in the hard-coded +# networksList. +tmux new-session -d -s evnode "env CELESTIA_CUSTOM=${CHAIN_ID} evnode \ + --home ${EVNODE_HOME} \ + --chain-id ${CHAIN_ID} \ + --bridge-addr ${BRIDGE_ADDR} \ + --bridge-token-file ${BRIDGE_JWT_FILE} \ + --core-grpc-addr ${CORE_GRPC_ADDR} \ + --core-network ${CHAIN_ID} \ + --keyring-path ${FIBRE_KEYRING_DIR} \ + --key-name fibre-0 \ + --signer-passphrase-file ${EVNODE_HOME}/.signer/passphrase \ + --log-level info \ + 2>&1 | tee -a /root/evnode.log" + +echo "ev-node started in tmux session 'evnode' — attach with: tmux attach -t evnode" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +// stageLoadgenPayload stages the evnode-txsim binary + a templated +// init script for each load-gen instance. The script bursts traffic at +// evnode-0's HTTP /tx endpoint for a fixed duration (override via the +// TXSIM_DURATION / TXSIM_CONCURRENCY / TXSIM_TX_SIZE env vars on the +// box). Final TXSIM: line lands in /root/txsim.log. +func stageLoadgenPayload(rootDir, payloadDir, buildDirPath string, cfg Config) error { + lgPayload := filepath.Join(rootDir, "loadgen-payload") + + if err := os.RemoveAll(lgPayload); err != nil { + return fmt.Errorf("clean old loadgen-payload: %w", err) + } + lgBuild := filepath.Join(lgPayload, "build") + if err := os.MkdirAll(lgBuild, 0o755); err != nil { + return err + } + + if buildDirPath == "" { + return fmt.Errorf("--build-dir is required when loadgens are configured (must contain `evnode-txsim` binary)") + } + src := filepath.Join(buildDirPath, "evnode-txsim") + if err := copyFile(src, filepath.Join(lgBuild, "evnode-txsim"), 0o755); err != nil { + return fmt.Errorf("copy evnode-txsim from build dir: %w", err) + } + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(lgPayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + evnodeIP := cfg.Evnodes[0].PublicIP + if evnodeIP == "" || evnodeIP == "TBD" { + return fmt.Errorf("evnode-0 has no public IP yet — run `talis up` before genesis") + } + + return writeLoadgenInitScript(filepath.Join(lgPayload, "loadgen_init.sh"), evnodeIP) +} + +// writeLoadgenInitScript writes the per-loadgen init script. evnode-0's +// HTTP endpoint is templated literally because it's only known after +// `talis up`. Tunables (duration, concurrency, tx size) come through +// env vars at start time so a single deploy can drive multiple +// experiments via SSH-set environment. +func writeLoadgenInitScript(path string, evnodeIP string) error { + script := `#!/bin/bash +set -euo pipefail + +EVNODE_IP="` + evnodeIP + `" +TARGET="${TXSIM_TARGET:-http://${EVNODE_IP}:7777/tx}" +DURATION="${TXSIM_DURATION:-30s}" +CONCURRENCY="${TXSIM_CONCURRENCY:-8}" +TX_SIZE="${TXSIM_TX_SIZE:-10240}" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl chrony tmux --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +cp loadgen-payload/build/evnode-txsim /bin/evnode-txsim +chmod +x /bin/evnode-txsim + +source loadgen-payload/vars.sh +echo "loadgen bootstrap: target=$TARGET duration=$DURATION concurrency=$CONCURRENCY tx_size=$TX_SIZE chain_id=$CHAIN_ID" + +# Wait for ev-node's tx endpoint to come up (it will only start once +# bridge JWT + fibre keyring are scp'd in by the operator). +echo "Waiting for $TARGET to accept tx (testing /stats)..." +STATS_URL="${TARGET%/tx}/stats" +WAITED=0 +until curl --silent --max-time 2 --output /dev/null "$STATS_URL" 2>/dev/null; do + sleep 5 + WAITED=$((WAITED + 5)) + if [ $((WAITED % 60)) -eq 0 ]; then + echo " still waiting for ev-node after ${WAITED}s..." + fi +done +echo "ev-node reachable after ${WAITED}s; starting txsim run" + +tmux kill-session -t txsim 2>/dev/null || true +tmux new-session -d -s txsim "evnode-txsim \ + --target $TARGET \ + --duration $DURATION \ + --concurrency $CONCURRENCY \ + --tx-size $TX_SIZE \ + 2>&1 | tee -a /root/txsim.log" + +echo "txsim started in tmux session 'txsim' — attach with: tmux attach -t txsim" +echo "Final summary lands at /root/txsim.log; grep TXSIM: for the machine-parseable line" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +// writeEncoderInitScript creates a minimal init script for encoder instances. +// Encoders only need the fibre-txsim binary, celestia-appd (for escrow deposits), +// a keyring, and genesis. +func writeEncoderInitScript(path string) error { + script := `#!/bin/bash +set -euo pipefail + +CELES_HOME="$HOME/.celestia-app" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl jq chrony --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +# TCP BBR +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +# Install binaries +cp encoder-payload/build/celestia-appd /bin/celestia-appd +cp encoder-payload/build/fibre-txsim /bin/fibre-txsim + +source encoder-payload/vars.sh + +# Determine this encoder's directory from hostname (e.g. "encoder-0") +hostname=$(hostname) +parsed_hostname=$(echo "$hostname" | awk -F'-' '{print $1 "-" $2}') + +# Set up celestia-app home with keyring + genesis +rm -rf "$CELES_HOME" +mkdir -p "$CELES_HOME/config" +cp encoder-payload/genesis.json "$CELES_HOME/config/genesis.json" +cp -r "encoder-payload/$parsed_hostname/keyring-test" "$CELES_HOME/" + +echo "Encoder $parsed_hostname initialized" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +func writeAWSEnv(varsPath string, cfg Config) error { + f, err := os.OpenFile(varsPath, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, + 0o755, + ) + if err != nil { + return fmt.Errorf("failed to open vars.sh for append: %w", err) + } + defer f.Close() + + exports := []string{ + fmt.Sprintf("export AWS_DEFAULT_REGION=%q\n", cfg.S3Config.Region), + fmt.Sprintf("export AWS_ACCESS_KEY_ID=%q\n", cfg.S3Config.AccessKeyID), + fmt.Sprintf("export AWS_SECRET_ACCESS_KEY=%q\n", cfg.S3Config.SecretAccessKey), + fmt.Sprintf("export AWS_S3_BUCKET=%q\n", cfg.S3Config.BucketName), + fmt.Sprintf("export AWS_S3_ENDPOINT=%q\n", cfg.S3Config.Endpoint), + fmt.Sprintf("export CHAIN_ID=%q\n", cfg.ChainID), + // celestia-node refuses any --p2p.network value that's not in + // its known networksList unless CELESTIA_CUSTOM registers a + // custom one. Format: :: + // — only netID is required. We use the chain id so bridge + + // evnode-fibre's Network identifier matches the consensus + // chain id and celestia-node's "wrong network in core.ip" + // validation passes. + fmt.Sprintf("export CELESTIA_CUSTOM=%q\n", cfg.ChainID), + } + + for _, line := range exports { + if _, err := f.WriteString(line); err != nil { + return fmt.Errorf("failed to append to vars.sh: %w", err) + } + } + + return nil +} diff --git a/tools/talis/go.mod b/tools/talis/go.mod new file mode 100644 index 0000000000..4478fce32e --- /dev/null +++ b/tools/talis/go.mod @@ -0,0 +1,289 @@ +module github.com/evstack/ev-node/tools/talis + +go 1.26.1 + +// Replace directives mirror celestia-app/feat/fibre-payments at +// fe8cb867. They are required because celestia-app's own go.mod uses +// celestia-forked SDK / cometbft / IBC modules, and Go's module system +// only honors `replace` directives in the main module — so any module +// that imports celestia-app must repeat them here. +replace ( + cosmossdk.io/api => github.com/celestiaorg/cosmos-sdk/api v0.7.6 + cosmossdk.io/log => github.com/celestiaorg/cosmos-sdk/log v1.3.0 + cosmossdk.io/store => github.com/celestiaorg/cosmos-sdk/store v1.1.3-celestia.1 + cosmossdk.io/x/tx => github.com/celestiaorg/cosmos-sdk/x/tx v0.13.9 + cosmossdk.io/x/upgrade => github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0 + github.com/cometbft/cometbft => github.com/celestiaorg/celestia-core v0.40.2 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v0.52.3 + github.com/cosmos/ibc-go/v8 => github.com/celestiaorg/ibc-go/v8 v8.7.2 + github.com/cosmos/ledger-cosmos-go => github.com/cosmos/ledger-cosmos-go v0.16.0 + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.55.0-tm-v0.34.35 +) + +require ( + cloud.google.com/go/compute v1.60.0 + cosmossdk.io/math v1.5.3 + github.com/aws/aws-sdk-go-v2 v1.41.6 + github.com/aws/aws-sdk-go-v2/config v1.32.14 + github.com/aws/aws-sdk-go-v2/credentials v1.19.14 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.297.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1 + github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427135040-fe8cb867259e + github.com/celestiaorg/go-square/v4 v4.0.0-rc4 + github.com/cometbft/cometbft v1.0.1 + github.com/cosmos/cosmos-sdk v0.50.13 + github.com/digitalocean/godo v1.186.0 + github.com/joho/godotenv v1.5.1 + github.com/spf13/cobra v1.10.2 + github.com/spf13/viper v1.21.0 + github.com/stretchr/testify v1.11.1 + golang.org/x/oauth2 v0.36.0 + google.golang.org/api v0.276.0 +) + +require ( + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go v0.123.0 // indirect + cloud.google.com/go/auth v0.20.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.3 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/storage v1.61.3 // indirect + cosmossdk.io/api v1.0.0 // indirect + cosmossdk.io/client/v2 v2.0.0-beta.8 // indirect + cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/core v1.1.0 // indirect + cosmossdk.io/depinject v1.2.1 // indirect + cosmossdk.io/errors v1.0.2 // indirect + cosmossdk.io/log v1.6.0 // indirect + cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/circuit v0.1.1 // indirect + cosmossdk.io/x/evidence v0.1.1 // indirect + cosmossdk.io/x/feegrant v0.1.1 // indirect + cosmossdk.io/x/tx v0.13.8 // indirect + cosmossdk.io/x/upgrade v0.1.4 // indirect + filippo.io/edwards25519 v1.1.1 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect + github.com/DataDog/zstd v1.5.7 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect + github.com/RaduBerinde/axisds v0.1.0 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 // indirect + github.com/aws/smithy-go v1.25.0 // indirect + github.com/bcp-innovations/hyperlane-cosmos v1.1.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect + github.com/bits-and-blooms/bitset v1.24.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/celestiaorg/go-square/v2 v2.3.3 // indirect + github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect + github.com/celestiaorg/nmt v0.24.3 // indirect + github.com/celestiaorg/rsmt2d v0.15.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.12.0 // indirect + github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/pebble/v2 v2.1.4 // indirect + github.com/cockroachdb/redact v1.1.6 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb // indirect + github.com/cometbft/cometbft-db v1.0.4 // indirect + github.com/consensys/gnark v0.14.0 // indirect + github.com/consensys/gnark-crypto v0.19.2 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-db v1.1.3 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogogateway v1.2.0 // indirect + github.com/cosmos/gogoproto v1.7.2 // indirect + github.com/cosmos/iavl v1.2.8 // indirect + github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0 // indirect + github.com/cosmos/ibc-go/modules/capability v1.0.1 // indirect + github.com/cosmos/ibc-go/v8 v8.7.0 // indirect + github.com/cosmos/ics23/go v0.11.0 // indirect + github.com/cosmos/ledger-cosmos-go v0.15.0 // indirect + github.com/danieljoos/wincred v1.2.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/desertbit/timer v1.0.1 // indirect + github.com/dgraph-io/badger/v4 v4.5.1 // indirect + github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect + github.com/ethereum/go-ethereum v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/getsentry/sentry-go v0.42.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.4 // indirect + github.com/go-kit/kit v0.13.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/googleapis v1.4.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/flatbuffers v25.1.24+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/orderedcode v0.0.1 // indirect + github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.21.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grafana/otel-profiling-go v0.5.1 // indirect + github.com/grafana/pyroscope-go v1.2.8 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.8.6 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-plugin v1.6.3 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/huandu/skiplist v1.2.1 // indirect + github.com/iancoleman/orderedmap v0.3.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/improbable-eng/grpc-web v0.15.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/klauspost/compress v1.18.5 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.12.3 // indirect + github.com/linxGnu/grocksdb v1.9.8 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/minio/highwayhash v1.0.4 // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.3.0 // indirect + github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.19.2 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/ronanh/intcomp v1.1.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/rs/zerolog v1.35.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sasha-s/go-deadlock v0.3.9 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/zondax/golem v0.27.0 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v1.0.1 // indirect + go.etcd.io/bbolt v1.4.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/term v0.42.0 // indirect + golang.org/x/text v0.36.0 // indirect + golang.org/x/time v0.15.0 // indirect + google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d // indirect + google.golang.org/grpc v1.80.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect + nhooyr.io/websocket v1.8.17 // indirect + pgregory.net/rapid v1.2.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/tools/talis/go.sum b/tools/talis/go.sum new file mode 100644 index 0000000000..27ab17b16b --- /dev/null +++ b/tools/talis/go.sum @@ -0,0 +1,1248 @@ +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute v1.60.0 h1:CqGt23ysz990ZZe1vq/9aDPKKnmwM6kcC7Y1Q05H2kI= +cloud.google.com/go/compute v1.60.0/go.mod h1:Xm6PbsLgBpAg4va77ljbBdpMjzuU+uPp5Ze2dnZq7lw= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= +cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= +cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= +cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/storage v1.61.3 h1:VS//ZfBuPGDvakfD9xyPW1RGF1Vy3BWUoVZXgW1KMOg= +cloud.google.com/go/storage v1.61.3/go.mod h1:JtqK8BBB7TWv0HVGHubtUdzYYrakOQIsMLffZ2Z/HWk= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= +cosmossdk.io/client/v2 v2.0.0-beta.8 h1:RXMJdA4V9H1H3/3BfMD6dAW3lF8W9DpNPPYnKD+ArxY= +cosmossdk.io/client/v2 v2.0.0-beta.8/go.mod h1:x+E2eji+ToMtUIqKzoJ5mJIhat+Zak47xZ8jOYjJQBA= +cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= +cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cosmossdk.io/core v1.1.0 h1:iJ7j2DjNsFzg4/z4ImNQYzy2D4LfMCsaQ8Lrz1KCmxk= +cosmossdk.io/core v1.1.0/go.mod h1:qGmJxBFHobvG1k4bROQnueslotBU5MIKZLC57xVBYYI= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= +cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= +cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= +cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= +cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= +cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= +cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= +cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= +cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= +cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= +cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= +github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8= +github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= +github.com/adlio/schema v1.4.0 h1:dekxG6P0my/bPvlyWzMULelR2Xej8RGErlnJcoY5ddw= +github.com/adlio/schema v1.4.0/go.mod h1:3/ojUldWBCWp4e+6VN9ets6unG5WdqbjF7vyzM0zTVQ= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.41.6 h1:1AX0AthnBQzMx1vbmir3Y4WsnJgiydmnJjiLu+LvXOg= +github.com/aws/aws-sdk-go-v2 v1.41.6/go.mod h1:dy0UzBIfwSeot4grGvY1AqFWN5zgziMmWGzysDnHFcQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 h1:adBsCIIpLbLmYnkQU+nAChU5yhVTvu5PerROm+/Kq2A= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9/go.mod h1:uOYhgfgThm/ZyAuJGNQ5YgNyOlYfqnGpTHXvk3cpykg= +github.com/aws/aws-sdk-go-v2/config v1.32.14 h1:opVIRo/ZbbI8OIqSOKmpFaY7IwfFUOCCXBsUpJOwDdI= +github.com/aws/aws-sdk-go-v2/config v1.32.14/go.mod h1:U4/V0uKxh0Tl5sxmCBZ3AecYny4UNlVmObYjKuuaiOo= +github.com/aws/aws-sdk-go-v2/credentials v1.19.14 h1:n+UcGWAIZHkXzYt87uMFBv/l8THYELoX6gVcUvgl6fI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.14/go.mod h1:cJKuyWB59Mqi0jM3nFYQRmnHVQIcgoxjEMAbLkpr62w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 h1:1hWFp+52Vq8Fevy/KUhbW/1MEApMz7uitCF/PQXRJpk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1/go.mod h1:sIec8j802/rCkCKgZV678HFR0s7lhQUYXT77tIvlaa4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 h1:GmLa5Kw1ESqtFpXsx5MmC84QWa/ZrLZvlJGa2y+4kcQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22/go.mod h1:6sW9iWm9DK9YRpRGga/qzrzNLgKpT2cIxb7Vo2eNOp0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 h1:dY4kWZiSaXIzxnKlj17nHnBcXXBfac6UlsAx2qL6XrU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22/go.mod h1:KIpEUx0JuRZLO7U6cbV204cWAEco2iC3l061IxlwLtI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 h1:FPXsW9+gMuIeKmz7j6ENWcWtBGTe1kH8r9thNt5Uxx4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23/go.mod h1:7J8iGMdRKk6lw2C+cMIphgAnT8uTwBwNOsGkyOCm80U= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.297.1 h1:9nfacm+uWgbdPaOplvJjxN50qgthexb7GOR/97ygc5o= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.297.1/go.mod h1:E1pnYwWFZ8N3REmeN9Fe/Zipbpps4HJj8DQGNnLUMYc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcbVcGABLOVuPYaIihj6IlkqubBwFj10K5fxRek= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 h1:xnvDEnw+pnj5mctWiYuFbigrEzSm35x7k4KS/ZkCANg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14/go.mod h1:yS5rNogD8e0Wu9+l3MUwr6eENBzEeGejvINpN5PAYfY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 h1:SE+aQ4DEqG53RRCAIHlCf//B2ycxGH7jFkpnAh/kKPM= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22/go.mod h1:ES3ynECd7fYeJIL6+oax+uIEljmfps0S70BaQzbMd/o= +github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1 h1:kU/eBN5+MWNo/LcbNa4hWDdN76hdcd7hocU5kvu7IsU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1/go.mod h1:Fw9aqhJicIVee1VytBBjH+l+5ov6/PhbtIK/u3rt/ls= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 h1:lFd1+ZSEYJZYvv9d6kXzhkZu07si3f+GQ1AaYwa2LUM= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.15/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 h1:dzztQ1YmfPrxdrOiuZRMF6fuOwWlWpD2StNLTceKpys= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw= +github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= +github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/bcp-innovations/hyperlane-cosmos v1.1.0 h1:WXt+WrKv2DG/xVIkLvggDRbi/2law104Vj6AWZGxHNw= +github.com/bcp-innovations/hyperlane-cosmos v1.1.0/go.mod h1:NP59yKAk2qFaT7+FSCh7kkoKKLlTxXNdIlxMstAJ5no= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.24.0 h1:H4x4TuulnokZKvHLfzVRTHJfFfnHEeSYJizujEZvmAM= +github.com/bits-and-blooms/bitset v1.24.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= +github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= +github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427135040-fe8cb867259e h1:0UJZ/CA2iPKdXP/tAK1qG35NPVnuXl7iwBDLOpbt2As= +github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427135040-fe8cb867259e/go.mod h1:HKqFxEeuWopDU87dCOkLktn6P4N+wEeVm1FdSEESvSQ= +github.com/celestiaorg/celestia-core v0.40.2 h1:+8D3anWx0mn0Wyp/Hahml/1ZiyDc5yGbIR/k4iFtqms= +github.com/celestiaorg/celestia-core v0.40.2/go.mod h1:ZCrmRE1UQzgZfho4Og6tAHtH1KY6s8Jpri5+EKobV5c= +github.com/celestiaorg/cosmos-sdk v0.52.3 h1:YPMFCycTw77P7tn+HQHTmmdBwXWNMDOrZ6/xVPK9nvM= +github.com/celestiaorg/cosmos-sdk v0.52.3/go.mod h1:2N4NRio08+WQsB7hsKo/ELXCQSWl78GiYdd9M1H6MpQ= +github.com/celestiaorg/cosmos-sdk/api v0.7.6 h1:81in9Zk+noz0ko+hZFSSK8L1aawFN8/CmdcQAUhbiUU= +github.com/celestiaorg/cosmos-sdk/api v0.7.6/go.mod h1:1BgQSufu6ZQkst3YBIHDCo/TPUrhfU4fV7tOI0ftql8= +github.com/celestiaorg/cosmos-sdk/log v1.3.0 h1:DfckA2UihWckeKHBQU3UXkF2G/qEmsPxd3LtGYB9HeM= +github.com/celestiaorg/cosmos-sdk/log v1.3.0/go.mod h1:lQTBplaW3HQLKQdPaQq+ElW6zASAoo9r3bJ7pOr8SWo= +github.com/celestiaorg/cosmos-sdk/store v1.1.3-celestia.1 h1:lEP9DjBMA5frZy/B1IYhAdbJrEwutwGQ+EiTOs4Lm8M= +github.com/celestiaorg/cosmos-sdk/store v1.1.3-celestia.1/go.mod h1:7+G078fe9GK42pXdYGncWm820tEJkzk+jc6K333Q7aI= +github.com/celestiaorg/cosmos-sdk/x/tx v0.13.9 h1:YELTe9/1YksoqSd+Hm1uDZ6auHFNhyJrk5jvli0lbT4= +github.com/celestiaorg/cosmos-sdk/x/tx v0.13.9/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= +github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0 h1:GyDYfK8dLETlUI7F+w+3QYQgAszUegMXgB6cTbDm7CA= +github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0/go.mod h1:T4K9O18zQNKNpt4YvTL3lcUt4aKOEU05ZIFWVdQi3Ak= +github.com/celestiaorg/go-square/v2 v2.3.3 h1:vhu6Lt39km19Q/Jk4nS3r2cuWJq6jFg+/1+iG8YGftY= +github.com/celestiaorg/go-square/v2 v2.3.3/go.mod h1:vY5RRv+qRmEVjPF6dAdr0dyLwKmTTDHHffENPQw8pUA= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/go-square/v4 v4.0.0-rc4 h1:bh7rney5lLq4Z9OpaSg9ckY9bt6BZUW0VYnFOi1RPwQ= +github.com/celestiaorg/go-square/v4 v4.0.0-rc4/go.mod h1:7Vc4H3u3gvcfLFp84EqyMVT/9r0ZGUgZP4aYMOYXVsw= +github.com/celestiaorg/ibc-go/v8 v8.7.2 h1:AWae851fdX7pJWlGnUBKlKJzpr4c2t5m4TLs6vDfmAY= +github.com/celestiaorg/ibc-go/v8 v8.7.2/go.mod h1:E3WTax+cfyDIehNRpwEI96/0E8GBtU1g9XWr18qUGZ8= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.24.3 h1:ylQnRlXkVoTtq36CxtCyXYZX4JISBsHgKlAAUAnf7ig= +github.com/celestiaorg/nmt v0.24.3/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= +github.com/celestiaorg/rsmt2d v0.15.2 h1:wHqNqaBboSX5e8Czm4FnBnys4RPp5gSNm4CAcsXAyTU= +github.com/celestiaorg/rsmt2d v0.15.2/go.mod h1:1NyWG9hj7veHbLmpQUKg+77teLuVgq0kpv3FS9nEtL4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= +github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk= +github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8= +github.com/cockroachdb/redact v1.1.6 h1:zXJBwDZ84xJNlHl1rMyCojqyIxv+7YUpQiJLQ7n4314= +github.com/cockroachdb/redact v1.1.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb h1:3bCgBvB8PbJVMX1ouCcSIxvsqKPYM7gs72o0zC76n9g= +github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/cometbft/cometbft-db v1.0.4 h1:cezb8yx/ZWcF124wqUtAFjAuDksS1y1yXedvtprUFxs= +github.com/cometbft/cometbft-db v1.0.4/go.mod h1:M+BtHAGU2XLrpUxo3Nn1nOCcnVCiLM9yx5OuT0u5SCA= +github.com/consensys/gnark v0.14.0 h1:RG+8WxRanFSFBSlmCDRJnYMYYKpH3Ncs5SMzg24B5HQ= +github.com/consensys/gnark v0.14.0/go.mod h1:1IBpDPB/Rdyh55bQRR4b0z1WvfHQN1e0020jCvKP2Gk= +github.com/consensys/gnark-crypto v0.19.2 h1:qrEAIXq3T4egxqiliFFoNrepkIWVEeIYwt3UL0fvS80= +github.com/consensys/gnark-crypto v0.19.2/go.mod h1:rT23F0XSZqE0mUA0+pRtnL56IbPxs6gp4CeRsBk4XS0= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= +github.com/cosmos/gogoproto v1.7.2 h1:5G25McIraOC0mRFv9TVO139Uh3OklV2hczr13KKVHCA= +github.com/cosmos/gogoproto v1.7.2/go.mod h1:8S7w53P1Y1cHwND64o0BnArT6RmdgIvsBuco6uTllsk= +github.com/cosmos/iavl v1.2.8 h1:55F96BGUJ7KT7h+Ky/cEqS+pEvhFqsU4O8Th3F0N1js= +github.com/cosmos/iavl v1.2.8/go.mod h1:FRHN4tO+6crf0p2zsqye+nAbsMgiwdkxpWm18DyP6+Y= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0 h1:rM+S14DFiqmu6Rc3PuhvWqwywPsnt/CbIslSnBftPFs= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0/go.mod h1:O5H9Ic3Pe6cmJn1eqlj5N48sLb8WQ1VWmDP4/11g/4E= +github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= +github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= +github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= +github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= +github.com/cosmos/ledger-cosmos-go v0.16.0 h1:YKlWPG9NnGZIEUb2bEfZ6zhON1CHlNTg0QKRRGcNEd0= +github.com/cosmos/ledger-cosmos-go v0.16.0/go.mod h1:WrM2xEa8koYoH2DgeIuZXNarF7FGuZl3mrIOnp3Dp0o= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= +github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= +github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitalocean/godo v1.186.0 h1:aEYwSumR47vD1tX5mdPdznHrR72DBfHcmh0v9MxCwCw= +github.com/digitalocean/godo v1.186.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= +github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= +github.com/ethereum/go-ethereum v1.17.0 h1:2D+1Fe23CwZ5tQoAS5DfwKFNI1HGcTwi65/kRlAVxes= +github.com/ethereum/go-ethereum v1.17.0/go.mod h1:2W3msvdosS/MCWytpqTcqgFiRYbTH59FxDJzqah120o= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/getsentry/sentry-go v0.42.0 h1:eeFMACuZTbUQf90RE8dE4tXeSe4CZyfvR1MBL7RLEt8= +github.com/getsentry/sentry-go v0.42.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA= +github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.1 h1:4hvbpePJKnIzH1B+8OR/JPbTx37NktoI9LE2QZBBkvE= +github.com/go-logfmt/logfmt v0.6.1/go.mod h1:EV2pOAQoZaT1ZXZbqDl5hrymndi4SY9ED9/z6CO0XAk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v25.1.24+incompatible h1:4wPqL3K7GzBd1CwyhSd3usxLKOaJN/AC6puCca6Jm7o= +github.com/google/flatbuffers v25.1.24+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= +github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= +github.com/grafana/pyroscope-go v1.2.8 h1:UvCwIhlx9DeV7F6TW/z8q1Mi4PIm3vuUJ2ZlCEvmA4M= +github.com/grafana/pyroscope-go v1.2.8/go.mod h1:SSi59eQ1/zmKoY/BKwa5rSFsJaq+242Bcrr4wPix1g8= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72 h1:vTCWu1wbdYo7PEZFem/rlr01+Un+wwVmI7wiegFdRLk= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72/go.mod h1:Vn+BBgKQHVQYdVQ4NZDICE1Brb+JfaONyDHr3q07oQc= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.8.6 h1:9sQboWULaydVphxc4S64oAI4YqpuCk7nPmvbk131ebY= +github.com/hashicorp/go-getter v1.8.6/go.mod h1:nVH12eOV2P58dIiL3rsU6Fh3wLeJEKBOJzhMmzlSWoo= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= +github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= +github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 h1:B+aWVgAx+GlFLhtYjIaF0uGjU3rzpl99Wf9wZWt+Mq8= +github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2/go.mod h1:CH/cwcr21pPWH+9GtK/PFaa4OGTv4CtfkCKro6GpbRE= +github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= +github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a h1:aP94idRf0yhG07gBSIyW3sy/cd+XNLWnghSp11y0oIc= +github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a/go.mod h1:yjqqjgMTQkBUHSG97/rm4zipffCNbCiZcB3kTqr++sQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.12.3 h1:tTWxr2YLKwIvK90ZXEw8GP7UFHtcbTtty8zsI+YjrfQ= +github.com/lib/pq v1.12.3/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linxGnu/grocksdb v1.9.8 h1:vOIKv9/+HKiqJAElJIEYv3ZLcihRxyP7Suu/Mu8Dxjs= +github.com/linxGnu/grocksdb v1.9.8/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/highwayhash v1.0.4 h1:asJizugGgchQod2ja9NJlGOWq4s7KsAWr5XUc9Clgl4= +github.com/minio/highwayhash v1.0.4/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= +github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM= +github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/ronanh/intcomp v1.1.1 h1:+1bGV/wEBiHI0FvzS7RHgzqOpfbBJzLIxkqMJ9e6yxY= +github.com/ronanh/intcomp v1.1.1/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGketo0IU= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= +github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.3.9 h1:fiaT9rB7g5sr5ddNZvlwheclN9IP86eFW9WgqlEQV+w= +github.com/sasha-s/go-deadlock v0.3.9/go.mod h1:KuZj51ZFmx42q/mPaYbRk0P1xcwe697zsJKE03vD4/Y= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zondax/golem v0.27.0 h1:IbBjGIXF3SoGOZHsILJvIM/F/ylwJzMcHAcggiqniPw= +github.com/zondax/golem v0.27.0/go.mod h1:AmorCgJPt00L8xN1VrMBe13PSifoZksnQ1Ge906bu4A= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v1.0.1 h1:Ks/2tz/dOF+dbRynfZ0dEhcdL1lqw43Sa0zMXHpQ3aQ= +github.com/zondax/ledger-go v1.0.1/go.mod h1:j7IgMY39f30apthJYMd1YsHZRqdyu4KbVmUp0nU78X0= +gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 h1:0Qx7VGBacMm9ZENQ7TnNObTYI4ShC+lHI16seduaxZo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0/go.mod h1:Sje3i3MjSPKTSPvVWCaL8ugBzJwik3u4smCjUeuupqg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d h1:wT2n40TBqFY6wiwazVK9/iTWbsQrgk5ZfCSVFLO9LQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/tools/talis/google_cloud.go b/tools/talis/google_cloud.go new file mode 100644 index 0000000000..943089b2fa --- /dev/null +++ b/tools/talis/google_cloud.go @@ -0,0 +1,825 @@ +package main + +import ( + "context" + "errors" + "fmt" + "log" + "math/rand" + "os" + "strings" + "sync" + "time" + + compute "cloud.google.com/go/compute/apiv1" + "cloud.google.com/go/compute/apiv1/computepb" + "google.golang.org/api/option" +) + +const ( + GCDefaultValidatorMachineType = "c3d-highcpu-16" + GCDefaultEncoderMachineType = "c3d-highcpu-8" + GCDefaultBridgeMachineType = "c3d-highcpu-8" + GCDefaultEvnodeMachineType = "c3d-highcpu-8" + GCDefaultLoadgenMachineType = "c3d-highcpu-8" + GCDefaultObservabilityMachineType = "e2-medium" + GCDefaultImage = "projects/ubuntu-os-cloud/global/images/family/ubuntu-2404-lts-amd64" + GCDefaultDiskSizeGB = 400 +) + +var ( + protoTCP = "tcp" + protoUDP = "udp" + protoICMP = "icmp" + dirIngress = computepb.Firewall_INGRESS.String() + boolTrue = true + externalNAT = "External NAT" + natType = computepb.AccessConfig_ONE_TO_ONE_NAT.String() + sshKeysLabel = "ssh-keys" + diskSizeGB = int64(GCDefaultDiskSizeGB) + + GCRegions = []string{ + "us-central1", "us-east1", "us-east4", "asia-southeast1", "europe-west1", "asia-east1", + } + GCZones = map[string][]string{ + "us-central1": {"us-central1-a", "us-central1-b", "us-central1-c"}, + "us-east1": {"us-east1-b", "us-east1-c", "us-east1-d"}, + "us-east4": {"us-east4-a", "us-east4-b", "us-east4-c"}, + "asia-southeast1": {"asia-southeast1-a", "asia-southeast1-b", "asia-southeast1-c"}, + "europe-west1": {"europe-west1-b", "europe-west1-c", "europe-west1-d"}, + "asia-east1": {"asia-east1-a", "asia-east1-b", "asia-east1-c"}, + } +) + +type GCClient struct { + ClientInfo + project string +} + +func NewGCClient(cfg Config) (*GCClient, error) { + if cfg.GoogleCloudProject == "" { + return nil, errors.New("google cloud project is required") + } + + sshKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read SSH public key at: %s %w", cfg.SSHPubKeyPath, err) + } + + return &GCClient{ + ClientInfo: ClientInfo{ + sshKey: sshKey, + cfg: cfg, + }, + project: cfg.GoogleCloudProject, + }, nil +} + +func (c *GCClient) Up(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != GoogleCloud { + continue + } + + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomGCRegion() + } + + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to create") + } + + opts, err := gcClientOptions(c.cfg) + if err != nil { + return fmt.Errorf("failed to create client options: %w", err) + } + + insts, err = CreateGCInstances(ctx, c.project, insts, string(c.sshKey), opts, workers) + if err != nil { + return fmt.Errorf("failed to create instances: %w", err) + } + + for _, inst := range insts { + cfg, err := c.cfg.UpdateInstance(inst.Name, inst.PublicIP, inst.PrivateIP) + if err != nil { + return fmt.Errorf("failed to update config with instance %s: %w", inst.Name, err) + } + c.cfg = cfg + } + + return nil +} + +func (c *GCClient) Down(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != GoogleCloud { + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomGCRegion() + } + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to destroy") + } + + opts, err := gcClientOptions(c.cfg) + if err != nil { + return fmt.Errorf("failed to create client options: %w", err) + } + + _, err = DestroyGCInstances(ctx, c.project, insts, opts, workers) + return err +} + +func (c *GCClient) List(ctx context.Context) error { + opts, err := gcClientOptions(c.cfg) + if err != nil { + return fmt.Errorf("failed to create client options: %w", err) + } + + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + cnt := 0 + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: c.project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + + if instance.Labels != nil { + if _, hasTalis := instance.Labels["talis"]; hasTalis { + publicIP := "" + if len(instance.NetworkInterfaces) > 0 { + ni := instance.NetworkInterfaces[0] + if len(ni.AccessConfigs) > 0 && ni.AccessConfigs[0].NatIP != nil { + publicIP = *ni.AccessConfigs[0].NatIP + } + } + + if cnt == 0 { + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "Name", "Status", "Zone", "Public IP", "Created") + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "----", "------", "------", "---------", "-------") + } + + status := "UNKNOWN" + if instance.Status != nil { + status = *instance.Status + } + name := "" + if instance.Name != nil { + name = *instance.Name + } + created := "" + if instance.CreationTimestamp != nil { + created = *instance.CreationTimestamp + } + + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", + name, + status, + zone, + publicIP, + created) + cnt++ + } + } + } + } + } + + fmt.Println("Total number of talis instances:", cnt) + return nil +} + +func (c *GCClient) GetConfig() Config { + return c.cfg +} + +func NewGoogleCloudValidator(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Validator) + i.Provider = GoogleCloud + i.Slug = GCDefaultValidatorMachineType + i.Region = region + return i +} + +func NewGoogleCloudEncoder(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Encoder) + i.Provider = GoogleCloud + i.Slug = GCDefaultEncoderMachineType + i.Region = region + return i +} + +func NewGoogleCloudBridge(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Bridge) + i.Provider = GoogleCloud + i.Slug = GCDefaultBridgeMachineType + i.Region = region + return i +} + +func NewGoogleCloudEvnode(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Evnode) + i.Provider = GoogleCloud + i.Slug = GCDefaultEvnodeMachineType + i.Region = region + return i +} + +func NewGoogleCloudLoadgen(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Loadgen) + i.Provider = GoogleCloud + i.Slug = GCDefaultLoadgenMachineType + i.Region = region + return i +} + +func NewGoogleCloudObservability(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Observability) + i.Provider = GoogleCloud + i.Slug = GCDefaultObservabilityMachineType + i.Region = region + return i +} + +func RandomGCRegion() string { + return GCRegions[rand.Intn(len(GCRegions))] +} + +func gcClientOptions(cfg Config) ([]option.ClientOption, error) { + var opts []option.ClientOption + if cfg.GoogleCloudKeyJSONPath != "" { + keyJSON, err := os.ReadFile(cfg.GoogleCloudKeyJSONPath) + if err != nil { + return nil, fmt.Errorf("failed to read Google Cloud key file at %s: %w", cfg.GoogleCloudKeyJSONPath, err) + } + opts = append(opts, option.WithAuthCredentialsJSON(option.ServiceAccount, keyJSON)) + } + return opts, nil +} + +func RandomGCZone(region string) string { + zones, ok := GCZones[region] + if !ok || len(zones) == 0 { + return region + "-a" + } + return zones[rand.Intn(len(zones))] +} + +func ensureGCFirewallRule(ctx context.Context, project string, opts []option.ClientOption) error { + client, err := compute.NewFirewallsRESTClient(ctx, opts...) + if err != nil { + return fmt.Errorf("failed to create firewall client: %w", err) + } + defer client.Close() + + firewallName := "talis-allow-all-ports" + + // Check if firewall rule already exists + getReq := &computepb.GetFirewallRequest{ + Project: project, + Firewall: firewallName, + } + _, err = client.Get(ctx, getReq) + if err == nil { + // Firewall rule already exists + log.Println("Firewall rule", firewallName, "already exists") + return nil + } + + // Create firewall rule to allow all incoming traffic + log.Println("Creating firewall rule", firewallName, "to allow all incoming traffic") + + firewall := &computepb.Firewall{ + Name: &firewallName, + Allowed: []*computepb.Allowed{ + { + IPProtocol: &protoTCP, + Ports: []string{"0-65535"}, + }, + { + IPProtocol: &protoUDP, + Ports: []string{"0-65535"}, + }, + { + IPProtocol: &protoICMP, + }, + }, + Direction: &dirIngress, + SourceRanges: []string{"0.0.0.0/0"}, + TargetTags: []string{"talis-allow-all"}, + } + + insertReq := &computepb.InsertFirewallRequest{ + Project: project, + FirewallResource: firewall, + } + + op, err := client.Insert(ctx, insertReq) + if err != nil { + return fmt.Errorf("failed to insert firewall rule: %w", err) + } + + if err := op.Wait(ctx); err != nil { + return fmt.Errorf("failed to wait for firewall rule creation: %w", err) + } + + log.Println("Firewall rule", firewallName, "created successfully") + return nil +} + +func CreateGCInstances(ctx context.Context, project string, insts []Instance, sshKey string, opts []option.ClientOption, workers int) ([]Instance, error) { + total := len(insts) + + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + insts, existing, err := filterExistingGCInstances(ctx, project, insts, opts) + if err != nil { + return nil, err + } + + if len(existing) > 0 { + log.Println("Existing instances found, so they are not being created.") + for _, v := range existing { + log.Println("Skipping", v.Name, v.PublicIP, v.Tags) + } + } + + // Ensure a firewall rule exists to allow all ports + if err := ensureGCFirewallRule(ctx, project, opts); err != nil { + return nil, fmt.Errorf("failed to ensure firewall rule: %w", err) + } + + results := make(chan result, total) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, v := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) + defer cancel() + + start := time.Now() + log.Println("Creating instance", inst.Name, "in region", inst.Region, start.Format(time.RFC3339)) + + zone := RandomGCZone(inst.Region) + pubIP, privIP, err := createGCInstance(ctx, project, inst, zone, sshKey, opts) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("create %s: %w", inst.Name, err)} + return + } + + inst.PublicIP = pubIP + inst.PrivateIP = privIP + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(v) + } + + go func() { + wg.Wait() + close(results) + }() + + var created []Instance + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed after %v %v\n", res.inst.Name, res.timeRequired, res.err) + } else { + created = append(created, res.inst) + fmt.Printf("✅ %s is up (public=%s) in %v\n", + res.inst.Name, res.inst.PublicIP, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(created), total) + } + + return created, nil +} + +func createGCInstance(ctx context.Context, project string, inst Instance, zone string, sshKey string, opts []option.ClientOption) (string, string, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return "", "", fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + labels := make(map[string]string) + for _, tag := range inst.Tags { + labels[strings.ReplaceAll(tag, "-", "_")] = "true" + } + + username := "root" + sshKeyMetadata := fmt.Sprintf("%s:%s", username, strings.TrimSpace(sshKey)) + + machineType := fmt.Sprintf("zones/%s/machineTypes/%s", zone, inst.Slug) + sourceImage := GCDefaultImage + + req := &computepb.InsertInstanceRequest{ + Project: project, + Zone: zone, + InstanceResource: &computepb.Instance{ + Name: &inst.Name, + MachineType: &machineType, + Labels: labels, + Tags: &computepb.Tags{ + Items: []string{"talis-allow-all"}, + }, + Disks: []*computepb.AttachedDisk{ + { + Boot: &boolTrue, + AutoDelete: &boolTrue, + InitializeParams: &computepb.AttachedDiskInitializeParams{ + SourceImage: &sourceImage, + DiskSizeGb: &diskSizeGB, + }, + }, + }, + NetworkInterfaces: []*computepb.NetworkInterface{ + { + AccessConfigs: []*computepb.AccessConfig{ + { + Name: &externalNAT, + Type: &natType, + }, + }, + }, + }, + Metadata: &computepb.Metadata{ + Items: []*computepb.Items{ + { + Key: &sshKeysLabel, + Value: &sshKeyMetadata, + }, + }, + }, + }, + } + + op, err := client.Insert(ctx, req) + if err != nil { + return "", "", fmt.Errorf("failed to insert instance: %w", err) + } + + if err := op.Wait(ctx); err != nil { + return "", "", fmt.Errorf("failed to wait for instance creation: %w", err) + } + + pubIP, privIP, err := waitForGCNetworkIP(ctx, client, project, zone, inst.Name) + if err != nil { + return "", "", fmt.Errorf("failed to get IPs: %w", err) + } + + return pubIP, privIP, nil +} + +func waitForGCNetworkIP(ctx context.Context, client *compute.InstancesClient, project, zone, name string) (string, string, error) { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + case <-ticker.C: + req := &computepb.GetInstanceRequest{ + Project: project, + Zone: zone, + Instance: name, + } + instance, err := client.Get(ctx, req) + if err != nil { + return "", "", err + } + + var pubIP, privIP string + if len(instance.NetworkInterfaces) > 0 { + ni := instance.NetworkInterfaces[0] + if ni.NetworkIP != nil { + privIP = *ni.NetworkIP + } + if len(ni.AccessConfigs) > 0 && ni.AccessConfigs[0].NatIP != nil { + pubIP = *ni.AccessConfigs[0].NatIP + } + } + + if pubIP != "" && privIP != "" { + return pubIP, privIP, nil + } + } + } +} + +func filterExistingGCInstances(ctx context.Context, project string, insts []Instance, opts []option.ClientOption) ([]Instance, []Instance, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return nil, nil, fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + existingTags := make(map[string]bool) + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + if instance.Labels != nil { + for label := range instance.Labels { + existingTags[strings.ReplaceAll(label, "_", "-")] = true + } + } + } + } + } + + var newInsts, existing []Instance + for _, inst := range insts { + experimentTag := GetExperimentTag(inst.Tags) + if experimentTag == "" || !existingTags[experimentTag] { + newInsts = append(newInsts, inst) + } else { + existing = append(existing, inst) + } + } + + return newInsts, existing, nil +} + +func DestroyGCInstances(ctx context.Context, project string, insts []Instance, opts []option.ClientOption, workers int) ([]Instance, error) { + return destroyGCInstancesInternal(ctx, project, insts, opts, workers) +} + +func findGCInstanceZone(ctx context.Context, project, instanceName, region string, opts []option.ClientOption) (string, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return "", fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + zones := GCZones[region] + if len(zones) == 0 { + zones = []string{region + "-a", region + "-b", region + "-c"} + } + + for _, zone := range zones { + req := &computepb.GetInstanceRequest{ + Project: project, + Zone: zone, + Instance: instanceName, + } + _, err := client.Get(ctx, req) + if err == nil { + return zone, nil + } + } + + return "", fmt.Errorf("instance %s not found in any zone of region %s", instanceName, region) +} + +func deleteGCInstance(ctx context.Context, project, zone, name string, opts []option.ClientOption) error { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + req := &computepb.DeleteInstanceRequest{ + Project: project, + Zone: zone, + Instance: name, + } + + op, err := client.Delete(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete instance: %w", err) + } + + if err := op.Wait(ctx); err != nil { + return fmt.Errorf("failed to wait for deletion: %w", err) + } + + return nil +} + +func checkForRunningGCExperiments(ctx context.Context, project string, opts []option.ClientOption, experimentID, chainID string) (bool, error) { + if project == "" { + return false, nil + } + + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return false, fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + if instance.Labels != nil { + if _, hasTalis := instance.Labels["talis"]; hasTalis { + for label := range instance.Labels { + if hasGCExperimentLabel(label, experimentID, chainID) { + return true, nil + } + } + } + } + } + } + } + + return false, nil +} + +func hasGCExperimentLabel(label, experimentID, chainID string) bool { + if !strings.HasPrefix(label, "validator_") && !strings.HasPrefix(label, "bridge_") && !strings.HasPrefix(label, "light_") && !strings.HasPrefix(label, "encoder_") { + return false + } + experimentIDLabel := strings.ReplaceAll(experimentID, "-", "_") + chainIDLabel := strings.ReplaceAll(chainID, "-", "_") + return strings.Contains(label, experimentIDLabel) && strings.Contains(label, chainIDLabel) +} + +func destroyAllTalisGCInstances(ctx context.Context, project string, opts []option.ClientOption, workers int) ([]Instance, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + var talisInstances []Instance + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + if instance.Labels != nil { + if _, hasTalis := instance.Labels["talis"]; hasTalis { + publicIP := "" + if len(instance.NetworkInterfaces) > 0 { + ni := instance.NetworkInterfaces[0] + if len(ni.AccessConfigs) > 0 && ni.AccessConfigs[0].NatIP != nil { + publicIP = *ni.AccessConfigs[0].NatIP + } + } + name := "" + if instance.Name != nil { + name = *instance.Name + } + talisInstances = append(talisInstances, Instance{ + Name: name, + PublicIP: publicIP, + Region: region, + }) + } + } + } + } + } + + if len(talisInstances) == 0 { + log.Println("No talis instances found to destroy") + return nil, nil + } + + return destroyGCInstancesInternal(ctx, project, talisInstances, opts, workers) +} + +func destroyGCInstancesInternal(ctx context.Context, project string, insts []Instance, opts []option.ClientOption, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + results := make(chan result, len(insts)) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, inst := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + start := time.Now() + + fmt.Println("⏳ Deleting instance", inst.Name, inst.PublicIP) + + delCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + zone, err := findGCInstanceZone(delCtx, project, inst.Name, inst.Region, opts) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("find zone for %s: %w", inst.Name, err)} + return + } + + if err := deleteGCInstance(delCtx, project, zone, inst.Name, opts); err != nil { + results <- result{inst: inst, err: fmt.Errorf("delete %s: %w", inst.Name, err)} + return + } + + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(inst) + } + + go func() { + wg.Wait() + close(results) + }() + + var removed []Instance + var failed []result + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed to delete after %v: %v\n", + res.inst.Name, res.timeRequired, res.err) + failed = append(failed, res) + } else { + removed = append(removed, res.inst) + fmt.Printf("✅ %s deleted (took %v)\n", res.inst.Name, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(removed)+len(failed), len(insts)) + } + + return removed, nil +} diff --git a/tools/talis/init.go b/tools/talis/init.go new file mode 100644 index 0000000000..96b34a1d45 --- /dev/null +++ b/tools/talis/init.go @@ -0,0 +1,314 @@ +package main + +import ( + "fmt" + "io" + "log" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + + "github.com/celestiaorg/celestia-app/v9/app" + cmtconfig "github.com/cometbft/cometbft/config" + serverconfig "github.com/cosmos/cosmos-sdk/server/config" + "github.com/joho/godotenv" + "github.com/spf13/cobra" +) + +const ( + EnvVarSSHKeyName = "TALIS_SSH_KEY_NAME" + EnvVarPubSSHKeyPath = "TALIS_SSH_PUB_KEY_PATH" + EnvVarSSHKeyPath = "TALIS_SSH_KEY_PATH" + EnvVarDigitalOceanToken = "DIGITALOCEAN_TOKEN" + EnvVarGoogleCloudProject = "GOOGLE_CLOUD_PROJECT" + EnvVarGoogleCloudKeyJSONPath = "GOOGLE_CLOUD_KEY_JSON_PATH" + EnvVarAWSAccessKeyID = "AWS_ACCESS_KEY_ID" + EnvVarAWSSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + EnvVarAWSRegion = "AWS_DEFAULT_REGION" + EnvVarS3Bucket = "AWS_S3_BUCKET" + EnvVarS3Endpoint = "AWS_S3_ENDPOINT" + EnvVarChainID = "CHAIN_ID" + mebibyte = 1_048_576 +) + +func initCmd() *cobra.Command { + var ( + rootDir string + srcRoot string + chainID string + experiment string + SSHPubKeyPath string + SSHKeyName string + tables []string + withObservability bool + provider string + observabilityRegion string + observabilitySlug string + awsZone string + ) + + cmd := &cobra.Command{ + Use: "init", + Short: "Initialize the Talis network", + Long: "Initialize the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + // Set default provider + if provider == "" { + provider = "digitalocean" + } + + // Load .env if it exists otherwise ignore + envPath := filepath.Join(rootDir, ".env") + err := godotenv.Load(envPath) + + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to load .env: %w", err) + } + + // Validate provider if .env was loaded + if err == nil { + fmt.Println("✅ Loaded environment variables from .env") + envProvider := os.Getenv("PROVIDER") + if envProvider != "" && envProvider != provider { + return fmt.Errorf("provider mismatch: .env has PROVIDER=%s but --provider=%s\nRegenerate with: talis init-env --provider %s", + envProvider, provider, provider) + } + + // Override SSH config from env vars if they exist + if envSSHKeyPath := os.Getenv(EnvVarSSHKeyPath); envSSHKeyPath != "" { + SSHPubKeyPath = envSSHKeyPath + } + if envSSHKeyName := os.Getenv(EnvVarSSHKeyName); envSSHKeyName != "" { + SSHKeyName = envSSHKeyName + } + } + + if err := initDirs(rootDir); err != nil { + return fmt.Errorf("failed to initialize directories: %w", err) + } + + if err := CopyTalisScripts(rootDir, srcRoot); err != nil { + return fmt.Errorf("failed to copy scripts: %w", err) + } + + // todo: use the number of validators, bridges, and lights to create the config + cfg := NewConfig(experiment, chainID). + WithSSHPubKeyPath(SSHPubKeyPath). + WithSSHKeyName(SSHKeyName) + + // If --with-observability is set, add a observability node and enable prometheus + enablePrometheus := false + if withObservability { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanObservability(observabilityRegion). + WithDigitalOceanToken(os.Getenv(EnvVarDigitalOceanToken)) + case "googlecloud": + cfg = cfg.WithGoogleCloudObservability(observabilityRegion). + WithGoogleCloudProject(os.Getenv(EnvVarGoogleCloudProject)). + WithGoogleCloudKeyJSONPath(os.Getenv(EnvVarGoogleCloudKeyJSONPath)) + case "aws": + cfg = cfg.WithAWSObservability(observabilityRegion). + WithAWSRegion(awsRegionFromEnv()). + WithAWSZone(resolveAWSZone(awsZone)) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + enablePrometheus = true + + if observabilitySlug != "" && len(cfg.Observability) > 0 { + cfg.Observability[0].Slug = observabilitySlug + } + } else if provider == "aws" { + // Stamp AWSRegion / AWSZone so NewClient later routes to + // AWSClient even when the user doesn't want an obs node. + cfg = cfg.WithAWSRegion(awsRegionFromEnv()). + WithAWSZone(resolveAWSZone(awsZone)) + } + + if err := cfg.Save(rootDir); err != nil { + return fmt.Errorf("failed to save init config: %w", err) + } + + // write the default config files that will be copied to the payload + // for each validator unless otherwise overridden + consensusConfig := app.DefaultConsensusConfig() + consConfig := DefaultConfigProfile(consensusConfig, tables, enablePrometheus) + cmtconfig.WriteConfigFile(filepath.Join(rootDir, "config.toml"), consConfig) + + // the sdk requires a global template be set just to save a toml file without panicking + serverconfig.SetConfigTemplate(serverconfig.DefaultConfigTemplate) + + appconfig := app.DefaultAppConfig() + appconfig.GRPC.Enable = true + appconfig.GRPC.Address = "0.0.0.0:9091" + + // Enable app telemetry when observability is enabled + if enablePrometheus { + appconfig.Telemetry.Enabled = true + appconfig.Telemetry.PrometheusRetentionTime = 60 + // Expose /metrics on the API server for Prometheus scraping. + appconfig.API.Enable = true + appconfig.API.Address = "tcp://0.0.0.0:1317" + } + + serverconfig.WriteConfigFile(filepath.Join(rootDir, "app.toml"), appconfig) + + return nil + }, + } + + homeDir, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get user home directory: %v", err) + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&srcRoot, "src-root", "r", homeDir, "directory which is a repo root or home directory for celestia app") + cmd.Flags().StringVarP(&chainID, "chainID", "c", "", "Chain ID (required)") + _ = cmd.MarkFlagRequired("chainID") + cmd.Flags().StringVarP(&experiment, "experiment", "e", "test", "the name of the experiment (required)") + _ = cmd.MarkFlagRequired("experiment") + cmd.Flags().StringArrayVarP(&tables, "tables", "t", []string{"consensus_round_state", "consensus_block", "mempool_tx"}, "the traces that will be collected") + cmd.Flags().BoolVar(&withObservability, "with-observability", false, "add a observability node and enable Prometheus on validators") + cmd.Flags().StringVarP(&provider, "provider", "p", "digitalocean", "provider for observability node when --with-observability is set (digitalocean, googlecloud, aws)") + cmd.Flags().StringVar(&observabilityRegion, "observability-region", "random", "region for the observability node — set to match your validator region to reduce scrape latency") + cmd.Flags().StringVar(&observabilitySlug, "observability-slug", "", "instance size for the observability node (default: provider's default — "+DODefaultObservabilitySlug+" for DigitalOcean, "+GCDefaultObservabilityMachineType+" for Google Cloud, "+AWSDefaultObservabilityInstanceType+" for AWS)") + cmd.Flags().StringVar(&awsZone, "aws-zone", "", "availability zone for AWS instances (default: "+AWSDefaultZone+"). All AWS instances share this AZ + a cluster placement group for free intra-AZ traffic and low latency.") + + defaultKeyPath := filepath.Join(homeDir, ".ssh", "id_ed25519.pub") + cmd.Flags().StringVarP(&SSHPubKeyPath, "ssh-pub-key-path", "s", defaultKeyPath, "path to the user's SSH public key") + + user, err := user.Current() + if err != nil { + log.Fatalf("failed to get current user: %v", err) + } + defaultKeyName := user.Username + cmd.Flags().StringVarP(&SSHKeyName, "ssh-key-name", "n", defaultKeyName, "name for the SSH key") + + return cmd +} + +func DefaultConfigProfile(cfg *cmtconfig.Config, tables []string, enablePrometheus bool) *cmtconfig.Config { + cfg.Instrumentation.TracingTables = strings.Join(tables, ",") + cfg.Instrumentation.TraceType = "local" + cfg.Instrumentation.Prometheus = enablePrometheus + cfg.Instrumentation.PrometheusListenAddr = ":26660" + cfg.P2P.SendRate = 100 * mebibyte + cfg.P2P.RecvRate = 110 * mebibyte + cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" + cfg.RPC.GRPCListenAddress = "tcp://0.0.0.0:9090" + return cfg +} + +func initDirs(rootDir string) error { + // 1) create the sub‑directories + for _, d := range []string{"payload", "data", "scripts"} { + dir := filepath.Join(rootDir, d) + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("failed to create %s: %w", dir, err) + } + } + + return nil +} + +// CopyTalisScripts copies the talis scripts directory into destDir. +// It checks multiple possible locations for the scripts. +func CopyTalisScripts(destDir string, root string) error { + candidates := []string{ + // here root can have different meanings + // repo root: + filepath.Join(root, "tools", "talis", "scripts"), + // root of all repos: + filepath.Join(root, "celestia-app", "tools", "talis", "scripts"), + // legacy root with src: + filepath.Join(root, "src", "celestia-app", "tools", "talis", "scripts"), + } + + var src string + for _, candidate := range candidates { + if fi, err := os.Stat(candidate); err == nil && fi.IsDir() { + src = candidate + break + } + } + + // Fallback to git clone if not found locally + if src == "" { + tmp, err := os.MkdirTemp("", "celestia-scripts-*") + if err != nil { + return fmt.Errorf("mktemp: %w", err) + } + defer os.RemoveAll(tmp) + + repo := "https://github.com/celestiaorg/celestia-app.git" + cmd := exec.Command("git", "clone", "--depth=1", repo, tmp) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("git clone failed: %w", err) + } + + src = filepath.Join(tmp, "tools", "talis", "scripts") + } + + // copy directory tree including subdirectories + return copyDir(src, filepath.Join(destDir, "scripts")) +} + +// copyDir recursively copies a directory tree, attempting to preserve permissions. +func copyDir(src string, dest string) error { + // walk through source + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + + target := filepath.Join(dest, rel) + + if info.IsDir() { + // create directory + if err := os.MkdirAll(target, 0o755); err != nil { + return err + } + return nil + } + + // it's a file; copy it + return copyFile(path, target, info.Mode()) + }) +} + +// copyFile copies a single file from src to dest, preserving permissions and creating parent directories if needed. +func copyFile(srcFile, destFile string, perm os.FileMode) error { + destDir := filepath.Dir(destFile) + if err := os.MkdirAll(destDir, 0o755); err != nil { + return fmt.Errorf("failed to create parent directory %s: %w", destDir, err) + } + + src, err := os.Open(srcFile) + if err != nil { + return fmt.Errorf("failed to open source file %s: %w", srcFile, err) + } + defer src.Close() + + dest, err := os.OpenFile(destFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, perm) + if err != nil { + return fmt.Errorf("failed to open destination file %s: %w", destFile, err) + } + defer dest.Close() + + if _, err = io.Copy(dest, src); err != nil { + return fmt.Errorf("failed to copy data: %w", err) + } + + return nil +} diff --git a/tools/talis/kpi_reproduction_steps.md b/tools/talis/kpi_reproduction_steps.md new file mode 100644 index 0000000000..ba96260b36 --- /dev/null +++ b/tools/talis/kpi_reproduction_steps.md @@ -0,0 +1,210 @@ +# Celestia KPI Reproduction Steps + +This document provides instructions for reproducing the core-app KPIs. These KPIs measure transaction submission performance and sync to tip duration. + +## Prerequisites + +1. **Verify block time configuration for 32MB/3sec blocks:** + + Modify `app_consts.go` and set `DelayedPrecommitTimeout = time.Millisecond * 2800` for 3s block time. + +2. **Install celestia-app and dependencies:** + + ```bash + # Build all necessary binaries (must be done after modifying DelayedPrecommitTimeout) + make build-talis-bins + + # Install talis + go install ./tools/talis/ + ``` + +3. **Set up cloud provider credentials:** + + Google Cloud is recommended for high-throughput tests. Ask the DevOps team for access to Celestia's Google Cloud fibreda workspace. + + ```bash + # Create a .env file + talis init-env --provider googlecloud + + # Fill in the .env file with your credentials: + GOOGLE_CLOUD_PROJECT="fibreda" + GOOGLE_CLOUD_KEY_JSON_PATH="/path/to/service-account-key.json" + ``` + +4. **SSH key is required for running experiments:** + + Create a new SSH key or use existing one. For Google Cloud the SSH key is automatically added to instance metadata by talis. + + Configure these variables in `.env`: + + ```bash + TALIS_SSH_KEY_PATH=your-key-path + TALIS_SSH_KEY_NAME=your-key-name + ``` + +5. **S3 bucket for faster deployment (optional):** + + For faster deployments using S3 upload instead of direct payload upload, configure an S3 bucket: + + ```bash + AWS_ACCESS_KEY_ID=your-access-key + AWS_SECRET_ACCESS_KEY=your-secret-key + AWS_DEFAULT_REGION=fra1 + AWS_S3_ENDPOINT=https://fra1.digitaloceanspaces.com + AWS_S3_BUCKET=your-bucket-name + ``` + +## Talis Network Deployment + +1. **Initialize Talis Network** + + ```bash + # Initialize with observability for metrics collection + talis init -c kpi-test-chain -e tx-kpi --with-observability --provider googlecloud + + # Add validator nodes (50-100 validators recommended for realistic network) + talis add -t validator -c 50 --provider googlecloud + ``` + +2. **Deploy Network** + + ```bash + # Spin up cloud instances (specify SSH key if not using defaults) + talis up --provider googlecloud --workers 20 + + # Create genesis with appropriate square size + # Square size 256 allows for ~32MB blocks + talis genesis -s 256 -b ./build + + # Deploy the network (specify SSH key if needed) + # Note: For faster deployment, use S3 upload instead of direct payload upload instead of --direct-payload-upload: + talis deploy --workers 20 + + # After deployment completes, talis will output the Grafana access information: + # URL, credentials. + + # Wait for network to start and optionally confirm all validators are online + talis status + ``` + +## Transaction Submission KPIs + +**NOTE** Reset the network between KPI experiments for fresh state/accurate results. + +```bash +talis reset +talis deploy --workers 20 +``` + +### KPI 1: 8MB/1sec (Single Submitter) + +**Target:** One latency monitor submitting 8MB blobs every second + +```bash +talis latency-monitor -i 1 -b 8000000 -z 8000000 -s 1000ms +``` + +**Expected Results:** + +- Success rate: >=99.9% +- Average user latency: 6-8 seconds +- No Evictions + +### KPI 2: Load Shedding (Two Submitters, 8MB/1sec each) + +**Target:** Two latency monitors submitting 8MB blobs every second (total 16MB/1sec) + +```bash +talis latency-monitor -i 2 -b 8000000 -z 8000000 -s 1000ms +``` + +**Expected Observations:** + +- Gas price increases under load +- Some broadcast failures due to full mempool +- Higher latency due to eviction timeouts +- Sequence mismatch errors from resubmission race conditions +- Network attempts load shedding by evicting low fee transactions + +### Test 3: Parallel Submission (Multiple Workers) + +**Target:** Single latency monitor with multiple parallel workers trying to fill up the throughput. + +```bash +# example: 15 workers submitting 2-8MB txs every 100ms +talis latency-monitor --instances 1 -w 15 -b 8000000 -z 2000000 --submission-delay 100ms +``` + +**Expected Results:** + +- Consistent throughput >9MB/1sec +- Good mempool distribution + +### Test 4: No Eviction (Optimal Conditions) + +This can already be measured in the first experiment but if you have to re-run: + +```bash +talis latency-monitor -i 1 -b 8000000 -z 8000000 -s 1000ms +``` + +**Expected Results:** + +- Transactions included with zero evictions + +## Collect Metrics and Results + +### From Grafana + +At `http://:3000` as displayed during `talis deploy`: + +- Access celestia grafana dashboards displaying network data +- Access Latency monitor dashboards displaying submission statistics and latency monitor logs + +## Cleanup + +```bash +# Destroy cloud instances +talis down --workers 20 +``` + +## Sync to Tip KPIs + +These KPIs measure how quickly a new node can sync to the network tip using state sync and block sync. + +**Target:** Total sync time <10 minutes (state sync + block sync) + +### Running Sync Tests + +#### Option 1: Local node (Mocha Testnet) + +This script runs multiple iterations and provides statistical analysis: + +```bash +# Single iteration +./scripts/mocha-measure-tip-sync.sh + +# Multiple iterations (20 iterations with 30s cooldown) +./scripts/mocha-measure-tip-sync.sh --iterations 20 --cooldown 30 +``` + +#### Option 2: Cloud Testing on DigitalOcean + +Use the `measure-tip-sync` tool which automates droplet creation, node setup, and sync measurement: + +1. **Install the tool** + + ```bash + go install ./tools/measure-tip-sync + ``` + +1. **Running Tests:** + +```bash +# Multiple iterations (20 iterations with 30s cooldown between each) +measure-tip-sync -k ~/.ssh/id_ed25519 -n 20 -c 30 +``` + +### Analyzing Sync Results + +The combined sync (state + block sync) must take less than 10 minutes. diff --git a/tools/talis/latency_monitor.go b/tools/talis/latency_monitor.go new file mode 100644 index 0000000000..13f964bf37 --- /dev/null +++ b/tools/talis/latency_monitor.go @@ -0,0 +1,382 @@ +package main + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/spf13/cobra" +) + +const ( + LatencyMonitorSessionName = "latency-monitor" +) + +// startLatencyMonitorCmd creates a cobra command for starting the latency monitor on remote instances. +func startLatencyMonitorCmd() *cobra.Command { + var ( + instances int + blobSize int + blobSizeMin int + submissionDelay string + namespace string + observabilityPort int + promtailConfig string + rootDir string + SSHKeyPath string + stop bool + workers int + ) + + cmd := &cobra.Command{ + Use: "latency-monitor", + Short: "Starts or stops the latency monitor on remote validators", + Long: "Connects to remote validators and starts/stops the latency monitor in a detached tmux session.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + if promtailConfig == "" { + promtailConfig = filepath.Join(rootDir, "observability", "promtail", "promtail-config.yml") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Only operate on the number of instances that were specified + insts := []Instance{} + for i, val := range cfg.Validators { + if i >= instances || i >= len(cfg.Validators) { + break + } + insts = append(insts, val) + } + + if stop { + fmt.Printf("Stopping latency monitor on %d instance(s)...\n", len(insts)) + return stopTmuxSession(insts, resolvedSSHKeyPath, LatencyMonitorSessionName, time.Minute*5) + } + + // Derive Loki URL from observability public IP + var lokiURL string + if len(cfg.Observability) > 0 { + if err := updateLatencyTargets(cfg, cfg.Observability[0], resolvedSSHKeyPath, insts); err != nil { + return err + } + + if cfg.Observability[0].PublicIP != "" { + lokiURL = fmt.Sprintf("http://%s:3100", cfg.Observability[0].PublicIP) + fmt.Printf("Using Loki URL from observability node: %s\n", lokiURL) + } + } + + latencyMonitorCmd := fmt.Sprintf( + "stdbuf -oL latency-monitor -k .celestia-app -a txsim -e localhost:9091 -b %d -z %d -d %s -n %s --observability-port %d -w %d 2>&1 | tee -a /root/latency-monitor-logs", + blobSize, + blobSizeMin, + submissionDelay, + namespace, + observabilityPort, + workers, + ) + + latencyMonitorScript := latencyMonitorCmd + if lokiURL != "" { + script, err := promtailScript(rootDir, promtailConfig, lokiURL, latencyMonitorCmd) + if err != nil { + return err + } + latencyMonitorScript = script + } + + fmt.Printf("Starting latency monitor on %d instance(s)...\n", len(insts)) + + if err := runScriptInTMux(insts, resolvedSSHKeyPath, latencyMonitorScript, LatencyMonitorSessionName, time.Minute*5); err != nil { + return err + } + return verifyLatencyMonitorStart(insts, resolvedSSHKeyPath, lokiURL != "", 30*time.Second) + }, + } + + // Define flags for the command + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to the user's SSH key (overrides environment variable and default)") + cmd.Flags().IntVarP(&instances, "instances", "i", 1, "the number of instances of latency monitor, each ran on its own validator") + cmd.Flags().IntVarP(&blobSize, "blob-size", "b", 1024, "the max number of bytes in each blob") + cmd.Flags().IntVarP(&blobSizeMin, "blob-size-min", "z", 1024, "the min number of bytes in each blob") + cmd.Flags().StringVarP(&submissionDelay, "submission-delay", "s", "4000ms", "delay between transaction submissions") + cmd.Flags().StringVarP(&namespace, "namespace", "n", "test", "namespace for blob submission") + cmd.Flags().IntVarP(&observabilityPort, "observability-port", "m", 9464, "port for Prometheus observability HTTP server (0 to disable)") + cmd.Flags().StringVar(&promtailConfig, "promtail-config", "", "path to promtail config template (defaults to ./observability/promtail/promtail-config.yml)") + cmd.Flags().BoolVar(&stop, "stop", false, "stop the latency monitor instead of starting it") + cmd.Flags().IntVarP(&workers, "workers", "w", 1, "number of parallel worker accounts for submission (1 = sequential, >1 = parallel)") + _ = cmd.MarkFlagRequired("instances") + + return cmd +} + +func promtailScript(rootDir, promtailConfigPath, lokiURL, latencyMonitorCmd string) (string, error) { + configBytes, err := os.ReadFile(promtailConfigPath) + if err != nil { + return "", fmt.Errorf("failed to read promtail config %q: %w", promtailConfigPath, err) + } + + normalizedLokiURL := normalizeLokiURL(strings.TrimRight(lokiURL, "/")) + configIncludesPushPath := strings.Contains(string(configBytes), "__LOKI_URL__/loki/api/v1/push") + normalizedLokiURL = ensureLokiPushURL(normalizedLokiURL, configIncludesPushPath) + renderedConfig := strings.ReplaceAll(string(configBytes), "__LOKI_URL__", normalizedLokiURL) + configB64 := base64.StdEncoding.EncodeToString([]byte(renderedConfig)) + + scriptPath := filepath.Join(rootDir, "tools", "talis", "scripts", "promtail.sh") + scriptBytes, err := os.ReadFile(scriptPath) + if err != nil { + return "", fmt.Errorf("failed to read promtail script template %q: %w", scriptPath, err) + } + + renderedScript := strings.NewReplacer( + "__PROMTAIL_CONFIG_B64__", configB64, + "__LATENCY_MONITOR_CMD__", latencyMonitorCmd, + ).Replace(string(scriptBytes)) + + return renderedScript, nil +} + +func normalizeLokiURL(raw string) string { + if strings.HasPrefix(raw, "http:/") && !strings.HasPrefix(raw, "http://") { + return "http://" + strings.TrimPrefix(raw, "http:/") + } + if strings.HasPrefix(raw, "https:/") && !strings.HasPrefix(raw, "https://") { + return "https://" + strings.TrimPrefix(raw, "https:/") + } + return raw +} + +func ensureLokiPushURL(lokiURL string, configIncludesPushPath bool) string { + if configIncludesPushPath { + return strings.TrimSuffix(lokiURL, "/loki/api/v1/push") + } + if strings.HasSuffix(lokiURL, "/loki/api/v1/push") { + return lokiURL + } + return lokiURL + "/loki/api/v1/push" +} + +// updateLatencyTargets updates the latency monitor targets on the observability monitoring node. It shows the nodes that are currently running the latency monitor. +func updateLatencyTargets(cfg Config, observabilityNode Instance, sshKeyPath string, instances []Instance) error { + groups, skipped, err := buildObservabilityTargetsForInstances(instances, cfg, latencyMonitorMetricsPort, "public", "validator") + if err != nil { + return err + } + + payload, err := marshalTargets(groups, true) + if err != nil { + return err + } + + if skipped > 0 { + log.Printf("skipped %d nodes for latency monitor targets (missing IP)", skipped) + } + + encoded := base64.StdEncoding.EncodeToString(payload) + remotePath := "/root/observability/docker/targets/latency_targets.json" + writeCmd := fmt.Sprintf("printf '%%s' %q | base64 -d > %s", encoded, remotePath) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + defer cancel() + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", observabilityNode.PublicIP), + writeCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("failed to update latency targets on %s: %w\n%s", observabilityNode.PublicIP, err, out) + } + + log.Printf("updated latency monitor targets on observability node %s (%d entries)", observabilityNode.PublicIP, len(groups)) + return nil +} + +func verifyLatencyMonitorStart(instances []Instance, sshKeyPath string, expectPromtail bool, timeout time.Duration) error { + var wg sync.WaitGroup + errCh := make(chan error, len(instances)) + + for _, inst := range instances { + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + runSSH := func(cmd string) ([]byte, error) { + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + cmd, + ) + return ssh.CombinedOutput() + } + + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if _, err := runSSH("pgrep -a latency-monitor"); err == nil { + if !expectPromtail { + return + } + if _, err := runSSH("pgrep -a promtail"); err == nil { + return + } + } + time.Sleep(2 * time.Second) + } + + promtailOut, _ := runSSH("tail -200 /root/promtail.log 2>/dev/null || true") + latmonOut, _ := runSSH("tail -200 /root/latency-monitor-logs 2>/dev/null || true") + errCh <- fmt.Errorf( + "[%s:%s] latency-monitor did not start within %s\n-- promtail.log --\n%s\n-- latency-monitor-logs --\n%s", + inst.Name, + inst.PublicIP, + timeout, + strings.TrimSpace(string(promtailOut)), + strings.TrimSpace(string(latmonOut)), + ) + }(inst) + } + + wg.Wait() + close(errCh) + + var errs []error //nolint:prealloc + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + sb := strings.Builder{} + sb.WriteString("latency-monitor failed to start on one or more hosts:\n") + for _, e := range errs { + sb.WriteString("- ") + sb.WriteString(e.Error()) + sb.WriteByte('\n') + } + return errors.New(sb.String()) + } + return nil +} + +const ( + gracefulShutdownPollInterval = 5 * time.Second + gracefulShutdownTimeout = 60 * time.Second +) + +// stopTmuxSession SSHes into each remote host in parallel and gracefully stops the tmux session. +// It sends Ctrl+C to initiate graceful shutdown, polls for session termination, and falls back +// to force-killing the session if it doesn't stop within the timeout. +func stopTmuxSession( + instances []Instance, + sshKeyPath string, + sessionName string, + timeout time.Duration, +) error { + var wg sync.WaitGroup + errCh := make(chan error, len(instances)) + counter := atomic.Uint32{} + + for _, inst := range instances { + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Helper to run SSH commands + runSSH := func(cmd string) ([]byte, error) { + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + cmd, + ) + return ssh.CombinedOutput() + } + + // Check if session exists first + if _, err := runSSH(fmt.Sprintf("tmux has-session -t %s 2>/dev/null", sessionName)); err != nil { + log.Printf("[%s] no %s session found, nothing to stop\n", inst.Name, sessionName) + counter.Add(1) + return + } + + // Send Ctrl+C to initiate graceful shutdown + log.Printf("[%s] sending Ctrl+C to %s session...\n", inst.Name, sessionName) + if _, err := runSSH(fmt.Sprintf("tmux send-keys -t %s C-c", sessionName)); err != nil { + errCh <- fmt.Errorf("[%s:%s] failed to send Ctrl+C: %v", inst.Name, inst.PublicIP, err) + return + } + + // Poll for session termination + deadline := time.Now().Add(gracefulShutdownTimeout) + for time.Now().Before(deadline) { + time.Sleep(gracefulShutdownPollInterval) + + // Check if session still exists + if _, err := runSSH(fmt.Sprintf("tmux has-session -t %s 2>/dev/null", sessionName)); err != nil { + // Session no longer exists - graceful shutdown succeeded + log.Printf("[%s] %s session gracefully stopped ✓ – %d/%d\n", + inst.Name, sessionName, counter.Add(1), len(instances)) + return + } + + log.Printf("[%s] %s session still running, waiting...\n", inst.Name, sessionName) + } + + // Timeout reached - force kill the session + log.Printf("[%s] timeout reached, force killing %s session...\n", inst.Name, sessionName) + if out, err := runSSH(fmt.Sprintf("tmux kill-session -t %s 2>/dev/null || true", sessionName)); err != nil { + errCh <- fmt.Errorf("[%s:%s] failed to force kill session: %v\n%s", + inst.Name, inst.PublicIP, err, out) + return + } + + log.Printf("[%s] %s session force killed ⚠️ – %d/%d\n", + sessionName, inst.Name, counter.Add(1), len(instances)) + }(inst) + } + + wg.Wait() + close(errCh) + + errs := make([]error, 0, len(instances)) + for err := range errCh { + errs = append(errs, err) + } + + if len(errs) == 0 { + return nil + } + + return fmt.Errorf("errors stopping tmux session:\n%w", errors.Join(errs...)) +} diff --git a/tools/talis/main.go b/tools/talis/main.go new file mode 100644 index 0000000000..016b1ee340 --- /dev/null +++ b/tools/talis/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "log" + + "github.com/spf13/cobra" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "talis", + Short: "Talis CLI", + Long: "Talis CLI is a command line interface for running performance experiments.", + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + } + + rootCmd.AddCommand( + downloadCmd(), + generateCmd(), + initCmd(), + initEnvCmd(), + statusCmd(), + listCmd(), + upCmd(), + downCmd(), + deployCmd(), + addCmd(), + startTxsimCmd(), + startLatencyMonitorCmd(), + uploadDataCmd(), + killTmuxSessionCmd(), + resetCmd(), + setupFibreCmd(), + startFibreCmd(), + fibreTxsimCmd(), + fibreThroughputCmd(), + fibreBootstrapEvnodeCmd(), + resourceMonitorCmd(), + downloadResourcesCmd(), + syncNodeCmd(), + ) + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/tools/talis/monitor.go b/tools/talis/monitor.go new file mode 100644 index 0000000000..09a4a8939e --- /dev/null +++ b/tools/talis/monitor.go @@ -0,0 +1,98 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const ResourceMonitorSessionName = "monitor" + +func resourceMonitorCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + nodes string + interval int + stop bool + ) + + cmd := &cobra.Command{ + Use: "resource-monitor", + Short: "Start network and CPU monitoring on remote validators", + Long: `Deploys a monitoring script to remote validators that records per-port +network bandwidth (iptables accounting for ports 9091, 26656, 26657) and +per-process CPU/memory usage (celestia-appd, fibre-txsim, txsim). + +Output is written to /root/monitor.jsonl on each validator. Use +'talis download-resources' to collect the results.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + validators, err := filterMatchingInstances(cfg.Validators, nodes) + if err != nil { + return fmt.Errorf("failed to filter nodes: %w", err) + } + if len(validators) == 0 { + return fmt.Errorf("no matching validators found for pattern %q", nodes) + } + + if stop { + fmt.Printf("Stopping monitor on %d validator(s)...\n", len(validators)) + return stopTmuxSession(validators, resolvedSSHKeyPath, ResourceMonitorSessionName, 5*time.Minute) + } + + // Read the monitor.sh script from the scripts directory. + scriptPath := filepath.Join(rootDir, "tools", "talis", "scripts", "monitor.sh") + scriptBytes, err := os.ReadFile(scriptPath) + if err != nil { + return fmt.Errorf("failed to read monitor script %q: %w", scriptPath, err) + } + + // Prepend the interval env var so the script picks it up. + script := fmt.Sprintf("export MONITOR_INTERVAL=%d\n%s", interval, string(scriptBytes)) + + fmt.Printf("Starting monitor on %d validator(s)...\n", len(validators)) + + if err := runScriptInTMux(validators, resolvedSSHKeyPath, script, ResourceMonitorSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start monitor sessions: %w", err) + } + + fmt.Println() + fmt.Println("=== monitor sessions started ===") + fmt.Printf(" tmux session: %s\n", ResourceMonitorSessionName) + fmt.Printf(" output file: /root/monitor.jsonl\n") + fmt.Printf(" log file: /root/talis-%s.log\n", ResourceMonitorSessionName) + fmt.Println(" validators:") + for _, val := range validators { + fmt.Printf(" - %s (%s)\n", val.Name, val.PublicIP) + } + fmt.Println() + fmt.Printf(" To stop: talis resource-monitor --stop\n") + fmt.Printf(" To kill: talis kill-session -s %s\n", ResourceMonitorSessionName) + fmt.Printf(" To download: talis download-resources\n") + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().StringVarP(&nodes, "nodes", "n", "validator-*", "glob pattern for which validators to monitor") + cmd.Flags().IntVar(&interval, "interval", 1, "sampling interval in seconds") + cmd.Flags().BoolVar(&stop, "stop", false, "stop monitoring instead of starting it") + + return cmd +} diff --git a/tools/talis/network.go b/tools/talis/network.go new file mode 100644 index 0000000000..c55bc4d47b --- /dev/null +++ b/tools/talis/network.go @@ -0,0 +1,354 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + sdkmath "cosmossdk.io/math" + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + "github.com/celestiaorg/celestia-app/v9/test/util/genesis" + blobtypes "github.com/celestiaorg/celestia-app/v9/x/blob/types" + minfeetypes "github.com/celestiaorg/celestia-app/v9/x/minfee/types" + "github.com/celestiaorg/go-square/v4/share" + cmtconfig "github.com/cometbft/cometbft/config" + cmtjson "github.com/cometbft/cometbft/libs/json" + cmtos "github.com/cometbft/cometbft/libs/os" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/pex" + "github.com/cometbft/cometbft/privval" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + serverconfig "github.com/cosmos/cosmos-sdk/server/config" + "github.com/spf13/viper" +) + +// NodeInfo is a struct that contains the name, IP address, and network address +// of a node. +type NodeInfo struct { + Name string `json:"name"` + IP string `json:"ip"` + NetworkAddress string `json:"network_address"` + Region string `json:"region"` +} + +func (n NodeInfo) PeerID() string { + return fmt.Sprintf("%s@%s:26656", n.NetworkAddress, n.IP) +} + +// Network maintains the initial state of the network. This includes the +// genesis, all relevant validators included in the genesis, and all accounts. +type Network struct { + genesis *genesis.Genesis + ecfg encoding.Config + + validators map[string]NodeInfo + accounts []string +} + +func NewNetwork(chainID string, squareSize int, mods ...genesis.Modifier) (*Network, error) { + codec := encoding.MakeConfig(app.ModuleEncodingRegisters...) + blobParams := blobtypes.DefaultParams() + blobParams.GovMaxSquareSize = uint64(squareSize) + cparams := app.DefaultConsensusParams() + cparams.Block.MaxBytes = int64(squareSize * squareSize * share.ContinuationSparseShareContentSize) + + mods = append(mods, genesis.ImmediateProposals(codec.Codec)) + mods = append(mods, genesis.SetBlobParams(codec.Codec, blobParams)) + + g := genesis.NewDefaultGenesis(). + WithChainID(chainID). + WithModifiers(mods...). + WithConsensusParams(cparams) + + return &Network{ + genesis: g, + validators: make(map[string]NodeInfo), + ecfg: codec, + }, nil +} + +func SetMinFee(codec codec.Codec, minFee float64) genesis.Modifier { + return func(state map[string]json.RawMessage) map[string]json.RawMessage { + minFeeGenState := minfeetypes.DefaultGenesis() + gasPrice, err := sdkmath.LegacyNewDecFromStr(fmt.Sprintf("%f", minFee)) + if err != nil { + panic(err) + } + minFeeGenState.NetworkMinGasPrice = gasPrice + state[minfeetypes.ModuleName] = codec.MustMarshalJSON(minFeeGenState) + return state + } +} + +// AddValidator adds a validator to the network. The validator is identified by +// its name which is assigned by pulumi as hardware is allocated. An additional +// account and keyring are saved to the payload directory that can be used by +// txsim. Pre-funded fibre accounts are also created for each validator. +// if the stake is set to 0, a default value is used. +func (n *Network) AddValidator(name, ip, payLoadRoot, region string, stake int64, fibreAccounts int) error { + n.validators[name] = NodeInfo{ + Name: name, + IP: ip, + Region: region, + } + + val := genesis.NewDefaultValidator(name) + if stake != 0 { + val.Stake = stake + } + err := n.genesis.NewValidator(val) + if err != nil { + return err + } + + // add a txsim key and keyring to each validator + kr, err := keyring.New(app.Name, keyring.BackendTest, + filepath.Join(payLoadRoot, name), nil, n.ecfg.Codec) + if err != nil { + return err + } + + // import the validator's state key to its own personal keyring + gkr := n.genesis.Keyring() + + valPriv, err := gkr.ExportPrivKeyArmor(name, "congest") + if err != nil { + return err + } + + err = kr.ImportPrivKey("validator", valPriv, "congest") + if err != nil { + return err + } + + if err := addFundedAccount(kr, n.genesis, "txsim"); err != nil { + return err + } + + fmt.Printf("creating %d fibre accounts for validator %s\n", fibreAccounts, name) + for i := range fibreAccounts { + if err := addFundedAccount(kr, n.genesis, fmt.Sprintf("fibre-%d", i)); err != nil { + return err + } + } + + return nil +} + +// AddEncoder creates a keyring for a dedicated encoder instance with uniquely +// prefixed fibre accounts (enc0-0, enc0-1, ...) so that multiple encoders can +// each fund their own escrow without blocking one another. +func (n *Network) AddEncoder(name, payLoadRoot string, fibreAccounts int) error { + kr, err := keyring.New(app.Name, keyring.BackendTest, + filepath.Join(payLoadRoot, name), nil, n.ecfg.Codec) + if err != nil { + return err + } + + index := extractIndexFromName(name) + keyPrefix := fmt.Sprintf("enc%d", index) + + fmt.Printf("creating %d fibre accounts for encoder %s (prefix=%s)\n", fibreAccounts, name, keyPrefix) + for i := range fibreAccounts { + if err := addFundedAccount(kr, n.genesis, fmt.Sprintf("%s-%d", keyPrefix, i)); err != nil { + return err + } + } + + return nil +} + +// addFundedAccount creates a new key in the local keyring and registers it as a +// funded account in genesis. The key lives in the validator's keyring so the +// binary (txsim, fibre-txsim) can sign transactions at runtime. +func addFundedAccount(kr keyring.Keyring, g *genesis.Genesis, name string) error { + key, _, err := kr.NewMnemonic(name, keyring.English, "", "", hd.Secp256k1) + if err != nil { + return err + } + pk, err := key.GetPubKey() + if err != nil { + return err + } + return g.AddAccount(genesis.Account{ + PubKey: pk, + Balance: 9999999999999999, + Name: name, + }) +} + +func (n *Network) Peers() []string { + var peers []string //nolint:prealloc + for _, v := range n.validators { + if v.IP == "" { + continue + } + peers = append(peers, v.PeerID()) + } + return peers +} + +func (n *Network) InitNodes(rootDir string) error { + if len(n.accounts) != 0 { + n.genesis.WithKeyringAccounts(genesis.NewKeyringAccounts(genesis.DefaultInitialBalance, n.accounts...)...) + } + + // save the genesis file + genesisPath := filepath.Join(rootDir, "genesis.json") + + genDoc, err := n.genesis.Export() + if err != nil { + return err + } + + genBytes, err := cmtjson.MarshalIndent(genDoc, "", " ") + if err != nil { + return err + } + + // save the genesis file as configured + err = cmtos.WriteFile(genesisPath, genBytes, 0o644) + if err != nil { + return err + } + + fmt.Println("genesis file saved to", genesisPath, "with", len(n.validators), "validators") + + vals := n.genesis.Validators() + + // Pass 1: write per-validator node_key.json + priv_validator files, and + // stamp NetworkAddress into n.validators so pass 2 can build a complete + // persistent_peers list. + for _, v := range vals { + valPath := filepath.Join(rootDir, v.Name) + nodeKeyFile := filepath.Join(valPath, "node_key.json") + if err := cmtos.EnsureDir(filepath.Dir(nodeKeyFile), 0o777); err != nil { + return err + } + + // add the network key assigned by the genesis to that validator's payload + nodeKey := &p2p.NodeKey{ + PrivKey: v.NetworkKey, + } + if err := nodeKey.SaveAs(nodeKeyFile); err != nil { + return err + } + ninfo, has := n.validators[v.Name] + if !has { + return fmt.Errorf("no validator found %s", v.Name) + } + ninfo.NetworkAddress = string(nodeKey.ID()) + n.validators[v.Name] = ninfo + + // generate remaining private key file using the assigned consensus key + pvStateFile := filepath.Join(valPath, "priv_validator_state.json") + if err := cmtos.EnsureDir(filepath.Dir(pvStateFile), 0o777); err != nil { + return err + } + pvKeyFile := filepath.Join(valPath, "priv_validator_key.json") + if err := cmtos.EnsureDir(filepath.Dir(pvKeyFile), 0o777); err != nil { + return err + } + filePV := privval.NewFilePV(v.ConsensusKey, pvKeyFile, pvStateFile) + filePV.Save() + } + + // Pass 2: now that every validator's NetworkAddress is known, write + // config.toml with a populated persistent_peers list. Without this the + // chain has no bootstrap mechanism — addrbook alone is not enough — and + // validators come up with zero peers and never reach quorum. + // + // Use the templated config.toml that `talis init` wrote one level up + // (built from app.DefaultConsensusConfig + DefaultConfigProfile, see + // init.go:137-139). That carries the celestia-specific overrides + // AND the talis profile bits (TracingTables, Prometheus enable/listen + // addr, RPC.GRPCListenAddress=0.0.0.0:9090). Falling back to + // app.DefaultConsensusConfig directly would silently drop the talis + // profile — observability would break on --with-observability runs. + baseCfgPath := filepath.Join(filepath.Dir(rootDir), "config.toml") + v := viper.New() + v.SetConfigFile(baseCfgPath) + if err := v.ReadInConfig(); err != nil { + return fmt.Errorf("failed to read base config %q: %w", baseCfgPath, err) + } + + for _, val := range vals { + selfInfo := n.validators[val.Name] + selfID := selfInfo.NetworkAddress + var peers []string + for _, peer := range n.validators { + if peer.NetworkAddress == "" || peer.NetworkAddress == selfID || peer.IP == "" || peer.IP == "TBD" { + continue + } + peers = append(peers, peer.PeerID()) + } + + // Start from app.DefaultConsensusConfig so any field absent from the + // templated TOML still inherits celestia defaults, then layer the + // templated values on top. + cmtcfg := app.DefaultConsensusConfig() + if err := v.Unmarshal(cmtcfg); err != nil { + return fmt.Errorf("failed to unmarshal base config: %w", err) + } + + // Without persistent_peers the chain has no bootstrap mechanism on + // a fresh testnet — addrbook alone is not enough — and validators + // come up with zero peers and never reach quorum. + cmtcfg.P2P.PersistentPeers = strings.Join(peers, ",") + // Enable the priv-validator gRPC endpoint that fibre needs to fetch + // the validator's public key for shard-assignment verification. + cmtcfg.PrivValidatorGRPCListenAddr = "127.0.0.1:26659" + cmtconfig.WriteConfigFile(filepath.Join(rootDir, val.Name, "config.toml"), cmtcfg) + + appcfg := app.DefaultAppConfig() + serverconfig.WriteConfigFile(filepath.Join(rootDir, val.Name, "app.toml"), appcfg) + } + + return nil +} + +// SaveValidatorsToFile saves the validators map as a JSON to the given file. +func (n *Network) SaveValidatorsToFile(filename string) error { + // Open the file for writing. Create it if it doesn't exist. + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Encode the validators map to JSON and write it to the file. + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") // Optional: format the JSON with indentation + err = encoder.Encode(n.validators) + if err != nil { + return err + } + + return nil +} + +func (n *Network) SaveAddressBook(payloadRoot string, peers []string) error { + addrBookFile := filepath.Join(payloadRoot, "addrbook.json") + return WriteAddressBook(peers, addrBookFile) +} + +func WriteAddressBook(peers []string, file string) error { + book := pex.NewAddrBook(file, false) + for _, peer := range peers { + addr, err := p2p.NewNetAddressString(peer) + if err != nil { + return fmt.Errorf("parsing peer address %s: %w", peer, err) + } + err = book.AddAddress(addr, addr) + if err != nil { + return fmt.Errorf("adding peer address %s: %w", peer, err) + } + } + book.Save() + return nil +} diff --git a/tools/talis/observability_payload.go b/tools/talis/observability_payload.go new file mode 100644 index 0000000000..225a4e8ed0 --- /dev/null +++ b/tools/talis/observability_payload.go @@ -0,0 +1,143 @@ +package main + +import ( + "crypto/rand" + "fmt" + "log" + "math/big" + "os" + "path/filepath" +) + +const ( + defaultMetricsPort = 26660 + appTelemetryPort = 1317 + latencyMonitorMetricsPort = 9464 + grafanaPasswordLength = 16 +) + +// generateGrafanaPassword generates a random alphanumeric password. +func generateGrafanaPassword() (string, error) { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + password := make([]byte, grafanaPasswordLength) + for i := range password { + n, err := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + if err != nil { + return "", fmt.Errorf("failed to generate random number: %w", err) + } + password[i] = charset[n.Int64()] + } + return string(password), nil +} + +// stageObservabilityPayload copies the observability directory (docker-compose, Prometheus config, +// Grafana dashboards, and setup scripts) into the payload directory and generates +// the targets.json file from the config. +// +// If no observability monitoring nodes are configured, this function does nothing. +// If observability monitoring nodes are configured but observabilitySrcDir is empty, it returns an error. +func stageObservabilityPayload(cfg Config, observabilitySrcDir, payloadDir string) error { + // Skip if no observability monitoring nodes configured + if len(cfg.Observability) == 0 { + return nil + } + + // Error if observability monitoring nodes configured but no observability directory provided + if observabilitySrcDir == "" { + return fmt.Errorf("observability monitoring nodes are configured but --observability-dir flag not provided") + } + + // Validate source directory exists + if fi, err := os.Stat(observabilitySrcDir); err != nil || !fi.IsDir() { + return fmt.Errorf("observability directory %q does not exist or is not a directory", observabilitySrcDir) + } + + dockerSrc := filepath.Join(observabilitySrcDir, "docker") + observabilityDest := filepath.Join(payloadDir, "observability") + dockerDest := filepath.Join(observabilityDest, "docker") + + if err := copyDir(dockerSrc, dockerDest); err != nil { + return fmt.Errorf("failed to copy observability docker assets: %w", err) + } + + for _, script := range []string{"install_metrics.sh", "start_metrics.sh"} { + src := filepath.Join(observabilitySrcDir, script) + dest := filepath.Join(observabilityDest, script) + if err := copyFile(src, dest, 0o755); err != nil { + return fmt.Errorf("failed to copy observability script %s: %w", script, err) + } + } + + // Generate validator observability targets (CometBFT on port 26660) + groups, skipped, err := buildObservabilityTargets(cfg, defaultMetricsPort, "public") + if err != nil { + return err + } + + payload, err := marshalTargets(groups, true) + if err != nil { + return err + } + + targetsDir := filepath.Join(dockerDest, "targets") + if err := os.MkdirAll(targetsDir, 0o755); err != nil { + return fmt.Errorf("failed to create targets directory: %w", err) + } + + targetsPath := filepath.Join(targetsDir, "targets.json") + if err := os.WriteFile(targetsPath, payload, 0o644); err != nil { + return fmt.Errorf("failed to write targets file: %w", err) + } + + // Generate latency monitor targets (same validators, port 9464) + latencyGroups, _, err := buildObservabilityTargets(cfg, latencyMonitorMetricsPort, "public") + if err != nil { + return err + } + + latencyPayload, err := marshalTargets(latencyGroups, true) + if err != nil { + return err + } + + latencyTargetsPath := filepath.Join(targetsDir, "latency_targets.json") + if err := os.WriteFile(latencyTargetsPath, latencyPayload, 0o644); err != nil { + return fmt.Errorf("failed to write latency targets file: %w", err) + } + + // Generate app telemetry targets (same validators, port 1317) + appGroups, _, err := buildObservabilityTargets(cfg, appTelemetryPort, "public") + if err != nil { + return err + } + + appPayload, err := marshalTargets(appGroups, true) + if err != nil { + return err + } + + appTargetsPath := filepath.Join(targetsDir, "app_targets.json") + if err := os.WriteFile(appTargetsPath, appPayload, 0o644); err != nil { + return fmt.Errorf("failed to write app targets file: %w", err) + } + + // Fibre metrics are pushed via OTel Collector (OTLP), not scraped directly. + + // Generate random Grafana password and write .env file + grafanaPassword, err := generateGrafanaPassword() + if err != nil { + return fmt.Errorf("failed to generate Grafana password: %w", err) + } + envContent := fmt.Sprintf("GRAFANA_PASSWORD=%s\n", grafanaPassword) + envPath := filepath.Join(dockerDest, ".env") + if err := os.WriteFile(envPath, []byte(envContent), 0o644); err != nil { + return fmt.Errorf("failed to write .env file: %w", err) + } + + log.Printf("staged observability payload with %d targets", len(groups)) + if skipped > 0 { + log.Printf("⚠️ skipped %d nodes for observability targets (missing private/public IP)", skipped) + } + + return nil +} diff --git a/tools/talis/observability_targets.go b/tools/talis/observability_targets.go new file mode 100644 index 0000000000..5a3d3aa404 --- /dev/null +++ b/tools/talis/observability_targets.go @@ -0,0 +1,108 @@ +package main + +import ( + "encoding/json" + "fmt" +) + +type targetGroup struct { + Targets []string `json:"targets"` + Labels map[string]string `json:"labels,omitempty"` +} + +func buildObservabilityTargets(cfg Config, port int, addressSource string) ([]targetGroup, int, error) { + if addressSource != "public" && addressSource != "private" { + return nil, 0, fmt.Errorf("invalid address source %q (use public or private)", addressSource) + } + + var groups []targetGroup //nolint:prealloc + var skipped int + + appendTargets := func(nodes []Instance, role string) { + for _, node := range nodes { + address, ok := nodeAddress(node, port, addressSource) + if !ok { + skipped++ + continue + } + + groups = append(groups, targetGroup{ + Targets: []string{address}, + Labels: map[string]string{ + "chain_id": cfg.ChainID, + "experiment": cfg.Experiment, + "role": role, + "region": node.Region, + "provider": string(node.Provider), + "node_id": node.Name, + }, + }) + } + } + + appendTargets(cfg.Validators, "validator") + appendTargets(cfg.Bridges, "bridge") + appendTargets(cfg.Lights, "light") + + return groups, skipped, nil +} + +func buildObservabilityTargetsForInstances(instances []Instance, cfg Config, port int, addressSource, role string) ([]targetGroup, int, error) { + if addressSource != "public" && addressSource != "private" { + return nil, 0, fmt.Errorf("invalid address source %q (use public or private)", addressSource) + } + + var groups []targetGroup //nolint:prealloc + var skipped int + + for _, node := range instances { + address, ok := nodeAddress(node, port, addressSource) + if !ok { + skipped++ + continue + } + + groups = append(groups, targetGroup{ + Targets: []string{address}, + Labels: map[string]string{ + "chain_id": cfg.ChainID, + "experiment": cfg.Experiment, + "role": role, + "region": node.Region, + "provider": string(node.Provider), + "node_id": node.Name, + }, + }) + } + + return groups, skipped, nil +} + +func marshalTargets(groups []targetGroup, pretty bool) ([]byte, error) { + if pretty { + return json.MarshalIndent(groups, "", " ") + } + return json.Marshal(groups) +} + +func nodeAddress(node Instance, port int, source string) (string, bool) { + var ip string + switch source { + case "public": + ip = node.PublicIP + if ip == "" || ip == "TBD" { + ip = node.PrivateIP + } + case "private": + ip = node.PrivateIP + if ip == "" || ip == "TBD" { + ip = node.PublicIP + } + } + + if ip == "" || ip == "TBD" { + return "", false + } + + return fmt.Sprintf("%s:%d", ip, port), true +} diff --git a/tools/talis/reset.go b/tools/talis/reset.go new file mode 100644 index 0000000000..28c2ca4d69 --- /dev/null +++ b/tools/talis/reset.go @@ -0,0 +1,141 @@ +package main + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" +) + +func resetCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + validators []string + workers int + ) + + cmd := &cobra.Command{ + Use: "reset", + Short: "Reset the specified validators or all validators", + Long: "Stops the running services and removes files created by the deploy command for specified validators or all validators", + RunE: func(cmd *cobra.Command, args []string) error { + // Load config + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedKey := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Filter validators if specific ones were requested + targetValidators := cfg.Validators + if len(validators) > 0 { + targetValidators = make([]Instance, 0) + for _, v := range cfg.Validators { + for _, requested := range validators { + if strings.Contains(v.Name, requested) { + targetValidators = append(targetValidators, v) + break + } + } + } + if len(targetValidators) == 0 { + return fmt.Errorf("no matching validators found") + } + } + + cleanupScript := ` + tmux kill-session -t app 2>/dev/null || true + tmux kill-session -t txsim 2>/dev/null || true + tmux kill-session -t latency-monitor 2>/dev/null || true + tmux kill-session -t fibre 2>/dev/null || true + tmux kill-session -t fibre-txsim 2>/dev/null || true + rm -rf .celestia-app .celestia-fibre logs payload payload.tar.gz /bin/celestia* /bin/txsim /bin/fibre /bin/fibre-txsim + ` + // Run cleanup on each validator + var wg sync.WaitGroup + workerChan := make(chan struct{}, workers) + for _, val := range targetValidators { + wg.Add(1) + go func(v Instance) { + defer wg.Done() + workerChan <- struct{}{} + defer func() { <-workerChan }() + fmt.Printf("Resetting validator %s...\n", v.Name) + if err := runScriptInTMux([]Instance{v}, resolvedKey, cleanupScript, "cleanup", time.Minute*5); err != nil { + fmt.Printf("Warning: error while cleaning up %s: %v\n", v.Name, err) + } + }(val) + } + wg.Wait() + + // Clean up encoder instances. + if len(cfg.Encoders) > 0 { + encoderCleanup := ` + tmux kill-session -t app 2>/dev/null || true + tmux kill-session -t fibre-txsim 2>/dev/null || true + tmux kill-session -t setup-fibre 2>/dev/null || true + rm -rf .celestia-app encoder-payload encoder-payload.tar.gz /bin/celestia* /bin/fibre-txsim + ` + var encWG sync.WaitGroup + encWorkerChan := make(chan struct{}, workers) + for _, enc := range cfg.Encoders { + encWG.Add(1) + go func(e Instance) { + defer encWG.Done() + encWorkerChan <- struct{}{} + defer func() { <-encWorkerChan }() + fmt.Printf("Resetting encoder %s...\n", e.Name) + if err := runScriptInTMux([]Instance{e}, resolvedKey, encoderCleanup, "cleanup", time.Minute*5); err != nil { + fmt.Printf("Warning: error while cleaning up %s: %v\n", e.Name, err) + } + }(enc) + } + encWG.Wait() + } + + // Clean up observability stack (Grafana/Prometheus/Loki) if configured. + if len(cfg.Observability) > 0 { + observabilityCleanup := ` + if [ -d /root/observability/docker ]; then + cd /root/observability/docker && docker compose down -v + fi + rm -rf /root/observability /root/observability-payload.tar.gz + ` + var obsWG sync.WaitGroup + obsWorkerChan := make(chan struct{}, workers) + for _, obs := range cfg.Observability { + obsWG.Add(1) + go func(o Instance) { + defer obsWG.Done() + obsWorkerChan <- struct{}{} + defer func() { <-obsWorkerChan }() + fmt.Printf("Resetting observability node %s...\n", o.Name) + if err := runScriptInTMux([]Instance{o}, resolvedKey, observabilityCleanup, "obs-cleanup", time.Minute*5); err != nil { + fmt.Printf("Warning: error while cleaning up %s: %v\n", o.Name, err) + } + }(obs) + } + obsWG.Wait() + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory to load config from") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "config file name") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "override path to your SSH private key") + cmd.Flags().StringSliceVarP(&validators, "validators", "v", []string{}, "optional list of validator names to reset (e.g. validator-0,validator-1)") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + + return cmd +} diff --git a/tools/talis/s3.go b/tools/talis/s3.go new file mode 100644 index 0000000000..7bb65f47cb --- /dev/null +++ b/tools/talis/s3.go @@ -0,0 +1,168 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/spf13/cobra" +) + +type S3Config struct { + Region string `json:"region"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + BucketName string `json:"bucket_name"` + Endpoint string `json:"endpoint"` +} + +// downloadS3DataCmd creates a cobra command for downloading a chain's data from S3. +func downloadS3DataCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + outDir string + chainID string + ) + + cmd := &cobra.Command{ + Use: "s3", + Short: "Download all files from S3 under / into a local directory", + Long: `Loads the network config, instantiates an AWS S3 client using the +credentials in it, then recursively downloads everything under +"//" into the output directory you specify.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + client, err := createS3Client(cmd.Context(), cfg) + if err != nil { + return fmt.Errorf("failed to create S3 client: %w", err) + } + if chainID != "" { + cfg.ChainID = chainID + } + + // 4. Compute prefix and download + prefix := cfg.ChainID + "/" + if err := downloadS3Directory(cmd.Context(), client, cfg.S3Config.BucketName, prefix, outDir); err != nil { + return fmt.Errorf("failed to download S3 objects: %w", err) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to find your config.json") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config file (under the directory)") + cmd.Flags().StringVarP(&outDir, "out", "o", "./data", "local directory into which to download the S3 objects") + cmd.Flags().StringVarP(&chainID, "chain-id", "i", "", "override the chain-id in the config") + + return cmd +} + +// downloadS3Directory lists and downloads all objects under the given prefix. +func downloadS3Directory(ctx context.Context, client *s3.Client, bucket, prefix, dest string) error { + paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + }) + + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return err + } + + for _, obj := range page.Contents { + // compute local file path: strip the prefix + relPath := strings.TrimPrefix(*obj.Key, prefix) + if relPath == "" { + // skip the "directory" marker itself + continue + } + localPath := filepath.Join(dest, relPath) + if err := os.MkdirAll(filepath.Dir(localPath), 0o755); err != nil { + return err + } + + // download each object + f, err := os.Create(localPath) + if err != nil { + return err + } + defer f.Close() + + _, err = client.GetObject(ctx, &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: obj.Key, + }, func(o *s3.Options) { + // no special options + }) + if err != nil { + return err + } + + // stream body into file + downloader := manager.NewDownloader(client) + _, err = downloader.Download(ctx, f, + &s3.GetObjectInput{Bucket: aws.String(bucket), Key: obj.Key}, + ) + if err != nil { + return fmt.Errorf("download %s: %w", *obj.Key, err) + } + + log.Println("Downloaded", *obj.Key) + } + } + + return nil +} + +func createS3Client(ctx context.Context, cfg Config) (*s3.Client, error) { + s3cfg := cfg.S3Config + + opts := []func(*config.LoadOptions) error{config.WithRegion(s3cfg.Region)} + + // If static creds are provided in config (typical for DO Spaces), use + // them. Otherwise fall back to the SDK default credential chain — env + // vars, AWS_PROFILE in ~/.aws/credentials, IAM role, etc. — so the AWS + // compute path works with named profiles. + if s3cfg.AccessKeyID != "" && s3cfg.SecretAccessKey != "" { + opts = append(opts, config.WithCredentialsProvider( + aws.NewCredentialsCache( + credentials.NewStaticCredentialsProvider( + s3cfg.AccessKeyID, + s3cfg.SecretAccessKey, + "", + ), + ), + )) + } + + if s3cfg.Endpoint != "" { + customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...any) (aws.Endpoint, error) { //nolint:staticcheck + return aws.Endpoint{ //nolint:staticcheck + URL: s3cfg.Endpoint, + SigningRegion: region, + }, nil + }) + opts = append(opts, config.WithEndpointResolverWithOptions(customResolver)) //nolint:staticcheck + } + + awsCfg, err := config.LoadDefaultConfig(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("failed to build AWS config: %w", err) + } + return s3.NewFromConfig(awsCfg), nil +} diff --git a/tools/talis/scripts/monitor.sh b/tools/talis/scripts/monitor.sh new file mode 100644 index 0000000000..8403a53be0 --- /dev/null +++ b/tools/talis/scripts/monitor.sh @@ -0,0 +1,215 @@ +#!/usr/bin/env bash +# monitor.sh — per-port network bandwidth + per-process CPU/memory monitoring. +# Writes JSONL to /root/monitor.jsonl (one sample per INTERVAL seconds). +# Designed to run on Linux validators with iptables and /proc. + +set -euo pipefail + +INTERVAL="${MONITOR_INTERVAL:-1}" +OUTPUT="/root/monitor.jsonl" +PORTS="9091 26656 26657" +PROCESS_NAMES="celestia-appd fibre-txsim txsim" + +# ---------- iptables accounting setup ---------- + +setup_iptables() { + iptables -N MONITOR_IN 2>/dev/null || true + iptables -N MONITOR_OUT 2>/dev/null || true + + # Remove old jump rules (ignore errors if absent) + iptables -D INPUT -j MONITOR_IN 2>/dev/null || true + iptables -D OUTPUT -j MONITOR_OUT 2>/dev/null || true + + # Flush any previous per-port rules + iptables -F MONITOR_IN + iptables -F MONITOR_OUT + + # Insert jump rules at the top of INPUT/OUTPUT + iptables -I INPUT 1 -j MONITOR_IN + iptables -I OUTPUT 1 -j MONITOR_OUT + + # Add per-port accounting rules + for port in $PORTS; do + iptables -A MONITOR_IN -p tcp --dport "$port" + iptables -A MONITOR_OUT -p tcp --sport "$port" + done +} + +cleanup_iptables() { + iptables -D INPUT -j MONITOR_IN 2>/dev/null || true + iptables -D OUTPUT -j MONITOR_OUT 2>/dev/null || true + iptables -F MONITOR_IN 2>/dev/null || true + iptables -F MONITOR_OUT 2>/dev/null || true + iptables -X MONITOR_IN 2>/dev/null || true + iptables -X MONITOR_OUT 2>/dev/null || true +} + +trap cleanup_iptables EXIT +setup_iptables + +# ---------- helpers ---------- + +# read_iptables_bytes +# Outputs one line per rule: " " +read_iptables_bytes() { + local chain="$1" + iptables -L "$chain" -v -n -x 2>/dev/null | awk ' + /tcp/ { + # Find the port: look for dpt: or spt: field + for (i = 1; i <= NF; i++) { + if ($i ~ /^[ds]pt:/) { + split($i, a, ":") + print a[2], $2 # port, bytes + } + } + } + ' +} + +# get_proc_stat — outputs "utime stime num_threads" from /proc//stat +get_proc_stat() { + local pid="$1" + # Fields: pid (comm) state ... (field 14=utime, 15=stime, 20=num_threads) + awk '{print $14, $15, $20}' "/proc/$pid/stat" 2>/dev/null || echo "0 0 0" +} + +# get_proc_rss_mb — outputs VmRSS in MB from /proc//status +get_proc_rss_mb() { + local pid="$1" + awk '/^VmRSS:/ {printf "%.1f", $2/1024}' "/proc/$pid/status" 2>/dev/null || echo "0" +} + +# get_total_cpu_ticks — sum of all fields from first line of /proc/stat +get_total_cpu_ticks() { + awk '/^cpu / {sum=0; for(i=2;i<=NF;i++) sum+=$i; print sum}' /proc/stat +} + +# get_system_mem — outputs "used_mb total_mb" +get_system_mem() { + awk ' + /^MemTotal:/ { total=$2 } + /^MemAvailable:/ { avail=$2 } + END { printf "%.0f %.0f", (total-avail)/1024, total/1024 } + ' /proc/meminfo +} + +# ---------- initial snapshot ---------- + +declare -A prev_in_bytes +declare -A prev_out_bytes +declare -A prev_proc_ticks + +# Seed network counters +while IFS=' ' read -r port bytes; do + prev_in_bytes["$port"]="$bytes" +done < <(read_iptables_bytes MONITOR_IN) + +while IFS=' ' read -r port bytes; do + prev_out_bytes["$port"]="$bytes" +done < <(read_iptables_bytes MONITOR_OUT) + +# Seed CPU counters +prev_total_ticks=$(get_total_cpu_ticks) +for name in $PROCESS_NAMES; do + pid=$(pgrep -x "$name" 2>/dev/null | head -1 || true) + if [ -n "$pid" ]; then + read -r ut st _threads <<< "$(get_proc_stat "$pid")" + prev_proc_ticks["$name"]=$((ut + st)) + else + prev_proc_ticks["$name"]=0 + fi +done + +sleep "$INTERVAL" + +# ---------- main loop ---------- + +while true; do + ts=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + + # --- network deltas --- + net_json="{" + first=true + while IFS=' ' read -r port bytes; do + prev=${prev_in_bytes["$port"]:-0} + delta=$(( (bytes - prev) / INTERVAL )) + prev_in_bytes["$port"]="$bytes" + if [ "$first" = true ]; then first=false; else net_json+=","; fi + net_json+="\"in_${port}_bytes_sec\":${delta}" + done < <(read_iptables_bytes MONITOR_IN) + + while IFS=' ' read -r port bytes; do + prev=${prev_out_bytes["$port"]:-0} + delta=$(( (bytes - prev) / INTERVAL )) + prev_out_bytes["$port"]="$bytes" + net_json+=",\"out_${port}_bytes_sec\":${delta}" + done < <(read_iptables_bytes MONITOR_OUT) + net_json+="}" + + # --- per-process CPU + memory --- + cur_total_ticks=$(get_total_cpu_ticks) + total_delta=$((cur_total_ticks - prev_total_ticks)) + prev_total_ticks=$cur_total_ticks + + proc_json="{" + first=true + for name in $PROCESS_NAMES; do + pid=$(pgrep -x "$name" 2>/dev/null | head -1 || true) + if [ -n "$pid" ]; then + read -r ut st threads <<< "$(get_proc_stat "$pid")" + cur_ticks=$((ut + st)) + prev_t=${prev_proc_ticks["$name"]:-0} + if [ "$total_delta" -gt 0 ]; then + # cpu_pct with one decimal: (proc_delta * 10000 / total_delta) then insert decimal + raw=$(( (cur_ticks - prev_t) * 10000 / total_delta )) + cpu_pct="$((raw / 10)).$((raw % 10))" + else + cpu_pct="0.0" + fi + prev_proc_ticks["$name"]=$cur_ticks + rss_mb=$(get_proc_rss_mb "$pid") + else + cpu_pct="0.0" + rss_mb="0" + threads="0" + prev_proc_ticks["$name"]=0 + fi + + if [ "$first" = true ]; then first=false; else proc_json+=","; fi + proc_json+="\"${name}\":{\"cpu_pct\":${cpu_pct},\"rss_mb\":${rss_mb},\"threads\":${threads}}" + done + proc_json+="}" + + # --- system-wide stats --- + if [ "$total_delta" -gt 0 ]; then + # System idle ticks are field 5 of /proc/stat cpu line + idle_ticks=$(awk '/^cpu / {print $5}' /proc/stat) + # We need current and previous idle, but for simplicity compute from total usage. + # Instead, use load average which is readily available. + : + fi + read -r mem_used mem_total <<< "$(get_system_mem)" + load_1m=$(awk '{print $1}' /proc/loadavg) + # System CPU%: from /proc/stat, compute as (1 - idle_delta/total_delta) * 100 + idle_now=$(awk '/^cpu / {print $5}' /proc/stat) + # We need the previous idle, store it + if [ -z "${prev_idle:-}" ]; then + prev_idle=$idle_now + fi + idle_delta=$((idle_now - prev_idle)) + prev_idle=$idle_now + if [ "$total_delta" -gt 0 ]; then + busy_delta=$((total_delta - idle_delta)) + raw=$(( busy_delta * 1000 / total_delta )) + sys_cpu="$((raw / 10)).$((raw % 10))" + else + sys_cpu="0.0" + fi + + sys_json="{\"cpu_pct\":${sys_cpu},\"load_1m\":${load_1m},\"mem_used_mb\":${mem_used},\"mem_total_mb\":${mem_total}}" + + # --- emit JSONL line --- + echo "{\"ts\":\"${ts}\",\"net\":${net_json},\"proc\":${proc_json},\"sys\":${sys_json}}" >> "$OUTPUT" + + sleep "$INTERVAL" +done diff --git a/tools/talis/scripts/promtail.sh b/tools/talis/scripts/promtail.sh new file mode 100644 index 0000000000..e200168505 --- /dev/null +++ b/tools/talis/scripts/promtail.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +export HOSTNAME=$(hostname) +PROMTAIL_CONFIG=/root/promtail-config.yml +printf "%s" "__PROMTAIL_CONFIG_B64__" | base64 -d > "$PROMTAIL_CONFIG" + +if ! command -v promtail >/dev/null 2>&1; then + arch=$(uname -m) + if [ "$arch" = "x86_64" ] || [ "$arch" = "amd64" ]; then arch=amd64; + elif [ "$arch" = "aarch64" ] || [ "$arch" = "arm64" ]; then arch=arm64; + else echo "unsupported arch: $arch" >&2; exit 1; fi + apt-get update -y >/dev/null + apt-get install -y curl unzip >/dev/null + tmpdir=$(mktemp -d) + curl -fsSL -o "$tmpdir/promtail.zip" "https://github.com/grafana/loki/releases/download/v2.9.3/promtail-linux-$arch.zip" + unzip -o "$tmpdir/promtail.zip" -d "$tmpdir" >/dev/null + install -m 0755 "$tmpdir/promtail-linux-$arch" /usr/local/bin/promtail +fi + +promtail -config.file="$PROMTAIL_CONFIG" -config.expand-env -server.http-listen-port=9080 > /root/promtail.log 2>&1 & +sleep 1 +pgrep -a promtail >/dev/null 2>&1 || (echo "promtail failed to start:" >&2; tail -200 /root/promtail.log >&2; exit 1) + +__LATENCY_MONITOR_CMD__ diff --git a/tools/talis/scripts/upload_traces.sh b/tools/talis/scripts/upload_traces.sh new file mode 100644 index 0000000000..1a587da1ac --- /dev/null +++ b/tools/talis/scripts/upload_traces.sh @@ -0,0 +1,45 @@ +#!/bin/bash +export DEBIAN_FRONTEND=noninteractive +export NEEDRESTART_MODE=a + +apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" apt-transport-https ca-certificates gnupg curl -y + +# ensure that the env vars are exported here +source /root/payload/vars.sh +echo "CHAIN_ID after sourcing vars.sh: $CHAIN_ID" + +# Set environment variables +PROJECT_ID="numeric-mile-433416-e9" +DATASET_ID="traces" + +CHAIN_ID=$CHAIN_ID + +LOCAL_DIR="/root/.celestia-app/data/traces" + +tmux kill-session -t app + +# Get the hostname +hostname=$(hostname) + +# Parse the first part of the hostname +nodeID=$(echo $hostname | awk -F'-' '{print $1 "-" $2}') + +source_dir="/root/.celestia-app/data/traces" +logs_path="/root/logs" + +# clean the data by removing the last line +find $source_dir -type f -name "*.jsonl" -exec sed -i '$d' {} \; + +AWS_DEFAULT_REGION="us-east-2" +S3_BUCKET_NAME="block-prop-traces-ef" +echo "All files loaded." + +snap install aws-cli --classic +destination_file="/tmp/${CHAIN_ID}_${nodeID}_traces.tar.gz" + +# Set the base S3 path +base_s3_path="s3://${S3_BUCKET_NAME}/${CHAIN_ID}/${nodeID}/" + +# Upload the directory structure to S3 +aws s3 cp "$source_dir" "$base_s3_path" --recursive --region $AWS_DEFAULT_REGION +aws s3 cp "$logs_path" "$base_s3_path" --region $AWS_DEFAULT_REGION diff --git a/tools/talis/scripts/validator_init.sh b/tools/talis/scripts/validator_init.sh new file mode 100644 index 0000000000..15345f9ea4 --- /dev/null +++ b/tools/talis/scripts/validator_init.sh @@ -0,0 +1,173 @@ +#!/bin/bash +CELES_HOME=".celestia-app" +MONIKER="validator" +ARCHIVE_NAME="payload.tar.gz" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install git build-essential ufw curl jq chrony snapd btop nethogs unzip --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +ufw allow 26657/tcp +ufw allow 26656/tcp +ufw allow 26657/udp +ufw allow 26656/udp + +systemctl enable chrony +systemctl start chrony + +# Ensure the script is run as root +if [ "$(id -u)" -ne 0 ]; then + echo "This script must be run as root. Please run with sudo or as root." + exit 1 +fi + +# Load the BBR module +echo "Loading BBR module..." +modprobe tcp_bbr + +# Verify if the BBR module is loaded +if lsmod | grep -q "tcp_bbr"; then + echo "BBR module loaded successfully." +else + echo "Failed to load BBR module." + exit 1 +fi + +# Add BBR to the list of available congestion control algorithms +echo "Updating sysctl settings..." +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +# Enable MPTCP +sysctl -w net.mptcp.enabled=1 + +# Set the path manager to ndiffports +sysctl -w net.mptcp.mptcp_path_manager=ndiffports + +# Specify the number of subflows +SUBFLOWS=16 +sysctl -w net.mptcp.mptcp_ndiffports=$SUBFLOWS + +# Make the changes persistent across reboots +echo "Making changes persistent..." +echo "net.core.default_qdisc=fq" >> /etc/sysctl.conf +echo "net.ipv4.tcp_congestion_control=bbr" >> /etc/sysctl.conf + +#Verify the current TCP congestion control algorithm +current_algo=$(sysctl net.ipv4.tcp_congestion_control | awk '{print $3}') +if [ "$current_algo" == "bbr" ]; then + echo "Successfully switched to BBR congestion control algorithm." +else + echo "Failed to switch to BBR. Current algorithm is $current_algo." + exit 1 +fi + +echo "Script completed successfully." + +# === Mount local NVMe instance store (if any) at fibre's data dir === +# +# c6id / i3en / i4i / i7i and similar instance families ship one or more +# unmounted ephemeral NVMe disks. They are 10–20× faster than the EBS +# root volume (gp3 baseline ~125 MB/s, instance NVMe is multi-GB/s). +# Without this block, fibre's `store.Put` writes to the EBS root and +# becomes the upload-path bottleneck — the disk saturates around 125 +# MB/s while the instance store sits idle. +# +# This is a no-op when: +# - the instance type has no second NVMe (DigitalOcean droplets, +# non-c6id EC2 types, Google Cloud) +# - the disk is already formatted+mounted from a previous run +# (idempotent on talis re-deploy) +mount_instance_nvme() { + local fibre_dir="/root/.celestia-fibre" + local label="celestia-fibre" + local dev="" + # Pick the largest unmounted whole-disk NVMe that has no partitions. + # Sort by size desc, take the first. + while read -r name size mp; do + [ "$mp" = "" ] || continue + [ -n "$(ls /sys/block/${name}/${name}p* 2>/dev/null)" ] && continue + dev="/dev/${name}" + break + done < <(lsblk -bdno NAME,SIZE,MOUNTPOINT 2>/dev/null \ + | awk '$1 ~ /^nvme/ {print}' | sort -k2 -nr) + if [ -z "$dev" ]; then + echo "no spare NVMe instance store found — fibre will run on the root volume" + return 0 + fi + if ! blkid "$dev" >/dev/null 2>&1; then + echo "Formatting $dev (ephemeral NVMe instance store) as ext4..." + mkfs.ext4 -F -E lazy_itable_init=1,lazy_journal_init=1 -L "$label" "$dev" + fi + mkdir -p "$fibre_dir" + if ! mountpoint -q "$fibre_dir"; then + mount -o noatime,nodiratime "$dev" "$fibre_dir" + fi + chown root:root "$fibre_dir" + echo "Mounted $dev at $fibre_dir ($(df -h "$fibre_dir" | tail -1))" +} +mount_instance_nvme + +tar -xzf /root/$ARCHIVE_NAME -C /root/ + +source ./vars.sh + +sudo snap install go --channel=1.26/stable --classic + +echo 'export GOPATH="$HOME/go"' >> ~/.profile +echo 'export GOBIN="$GOPATH/bin"' >> ~/.profile +echo 'export PATH="$GOBIN:$PATH"' >> ~/.profile +source ~/.profile + +cd $HOME + +# Get the hostname +hostname=$(hostname) + +# Parse the first part of the hostname +parsed_hostname=$(echo $hostname | awk -F'-' '{print $1 "-" $2}') + +cp payload/build/celestia-appd /bin/celestia-appd +cp payload/build/txsim /bin/txsim +cp payload/build/latency-monitor /bin/latency-monitor +cp payload/build/fibre /bin/fibre +cp payload/build/fibre-txsim /bin/fibre-txsim + +cd $HOME + +rm -rf .celestia-app/ + +celestia-appd config chain-id $CHAIN_ID + +celestia-appd init --chain-id=$CHAIN_ID --home $CELES_HOME $MONIKER + +mv payload/$parsed_hostname/node_key.json $HOME/$CELES_HOME/config/node_key.json + +mv payload/$parsed_hostname/priv_validator_key.json $HOME/$CELES_HOME/config/priv_validator_key.json + +mv payload/$parsed_hostname/priv_validator_state.json $HOME/$CELES_HOME/data/priv_validator_state.json + +cp payload/genesis.json $HOME/$CELES_HOME/config/genesis.json + +cp payload/addrbook.json $HOME/$CELES_HOME/config/addrbook.json + +mv payload/$parsed_hostname/app.toml $HOME/$CELES_HOME/config/app.toml + +mv payload/$parsed_hostname/config.toml $HOME/$CELES_HOME/config/config.toml + +cp -r payload/$parsed_hostname/keyring-test $HOME/$CELES_HOME + +# run txsim script which starts a sleep timer and txsim in a different tmux session +source payload/txsim.sh + +# Get the hostname of the machine +HOSTNAME=$(hostname) + +# Base command +COMMAND="celestia-appd start" + +# Define log file path +LOG_FILE="/root/logs" + +# Execute the command and redirect output to the log file +eval $COMMAND 2>&1 | tee -a "$LOG_FILE" diff --git a/tools/talis/scripts/vars.sh b/tools/talis/scripts/vars.sh new file mode 100644 index 0000000000..69b7857893 --- /dev/null +++ b/tools/talis/scripts/vars.sh @@ -0,0 +1,2 @@ +#!/bin/bash +# this file holds env vars for remote machines diff --git a/tools/talis/start_fibre.go b/tools/talis/start_fibre.go new file mode 100644 index 0000000000..4007573d61 --- /dev/null +++ b/tools/talis/start_fibre.go @@ -0,0 +1,91 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const StartFibreSessionName = "fibre" + +func startFibreCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + instances int + metricsAddress string + pyroscopeEndpoint string + ) + + cmd := &cobra.Command{ + Use: "start-fibre", + Short: "Start fibre server on remote validators via SSH + tmux", + Long: "Starts fibre server tmux sessions on remote validators. The fibre binary must already be deployed via 'talis deploy'.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Select first N validators (default all) + if instances <= 0 || instances > len(cfg.Validators) { + instances = len(cfg.Validators) + } + validators := cfg.Validators[:instances] + + // Build the remote command + // OTEL_METRICS_EXEMPLAR_FILTER=always_on attaches trace exemplars to all metric observations + remoteCmd := "OTEL_METRICS_EXEMPLAR_FILTER=always_on fibre start --home .celestia-fibre --app-grpc-address localhost:9091" + // Auto-enable metrics when observability nodes are configured + if metricsAddress == "" && len(cfg.Observability) > 0 { + metricsAddress = fmt.Sprintf("http://%s:4318", cfg.Observability[0].PublicIP) + } + if metricsAddress != "" { + remoteCmd += fmt.Sprintf(" --otel-endpoint %s", metricsAddress) + } + // Auto-wire Pyroscope endpoint when observability nodes are configured + if pyroscopeEndpoint == "" && len(cfg.Observability) > 0 { + pyroscopeEndpoint = fmt.Sprintf("http://%s:4040", cfg.Observability[0].PublicIP) + } + if pyroscopeEndpoint != "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint %s", pyroscopeEndpoint) + } + + fmt.Printf("Starting fibre sessions on %d validator(s)...\n", len(validators)) + + if err := runScriptInTMux(validators, resolvedSSHKeyPath, remoteCmd, StartFibreSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start remote sessions: %w", err) + } + + // Print summary + fmt.Println() + fmt.Println("=== fibre sessions started ===") + fmt.Printf(" tmux session: %s\n", StartFibreSessionName) + fmt.Printf(" log file: /root/talis-%s.log\n", StartFibreSessionName) + fmt.Println(" validators:") + for _, val := range validators { + fmt.Printf(" - %s (%s)\n", val.Name, val.PublicIP) + } + fmt.Println() + fmt.Printf(" To kill all: talis kill-session -s %s\n", StartFibreSessionName) + fmt.Printf(" To view logs: ssh root@ 'cat /root/talis-%s.log'\n", StartFibreSessionName) + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().IntVar(&instances, "instances", 0, "number of validators to start fibre on (default all)") + cmd.Flags().StringVar(&metricsAddress, "otel-endpoint", "", "OTLP HTTP endpoint for metrics/traces (e.g. http://host:4318; empty = disabled)") + cmd.Flags().StringVar(&pyroscopeEndpoint, "pyroscope-endpoint", "", "Pyroscope endpoint for continuous profiling (default: auto-detected from observability config, e.g. http://host:4040)") + + return cmd +} diff --git a/tools/talis/status.go b/tools/talis/status.go new file mode 100644 index 0000000000..47bedcf6bd --- /dev/null +++ b/tools/talis/status.go @@ -0,0 +1,74 @@ +package main + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + "github.com/cometbft/cometbft/rpc/client/http" + "github.com/spf13/cobra" +) + +func statusCmd() *cobra.Command { + var rootDir string + + cmd := &cobra.Command{ + Use: "status", + Short: "Ping a set of CometBFT nodes and report their latest block height", + Long: "Loads a JSON config containing validator instances, then asynchronously queries each node’s /status endpoint (port 26657) and prints its latest block height.", + Aliases: []string{"s"}, + RunE: func(cmd *cobra.Command, args []string) error { // 1) Load configuration from disk + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config from %q: %w", rootDir, err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators (nodes) found in config") + } + + var wg sync.WaitGroup + for _, val := range cfg.Validators { + ip := val.PublicIP + if ip == "" { + fmt.Printf("Skipping %q: no public_ip defined\n", val.Name) + continue + } + + wg.Add(1) + go func(nodeName, nodeIP string) { + defer wg.Done() + + remote := fmt.Sprintf("http://%s:26657", nodeIP) + client, err := http.New(remote, "/websocket") + if err != nil { + log.Printf("Failed to create RPC client for %s (%s:26657): %v\n", nodeName, nodeIP, err) + return + } + + // 4) Call the typed Status endpoint + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + res, err := client.Status(ctx) + if err != nil { + log.Printf("Failed to get status from %s (%s:26657): %v\n", nodeName, nodeIP, err) + return + } + + height := res.SyncInfo.LatestBlockHeight + + log.Printf("%s (%s): height %d\n", nodeName, nodeIP, height) + }(val.Name, ip) + } + + wg.Wait() + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory containing your config") + return cmd +} diff --git a/tools/talis/sync_node_cmd.go b/tools/talis/sync_node_cmd.go new file mode 100644 index 0000000000..b4b57cb9f3 --- /dev/null +++ b/tools/talis/sync_node_cmd.go @@ -0,0 +1,677 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "math" + "math/rand" + "os" + "os/exec" + "os/signal" + "regexp" + "sort" + "strconv" + "strings" + "syscall" + "time" + + "github.com/spf13/cobra" +) + +type syncResult struct { + stateSyncDuration int + blockSyncDuration int + totalDuration int + finalHeight int +} + +var ( + syncResultRe = regexp.MustCompile(`(?m)^State sync duration:\s+(\d+)s\r?$`) + blockSyncRe = regexp.MustCompile(`(?m)^Block sync duration:\s+(\d+)s\r?$`) + totalDurationRe = regexp.MustCompile(`(?m)^Total sync duration:\s+(\d+)s\r?$`) + finalHeightRe = regexp.MustCompile(`(?m)^Final height:\s+(\d+)\r?$`) +) + +func parseSyncResult(output string) (syncResult, error) { + extract := func(re *regexp.Regexp) (int, error) { + m := re.FindStringSubmatch(output) + if m == nil { + return 0, fmt.Errorf("pattern %q not found in output", re.String()) + } + return strconv.Atoi(m[1]) + } + var r syncResult + var err error + if r.stateSyncDuration, err = extract(syncResultRe); err != nil { + return r, err + } + if r.blockSyncDuration, err = extract(blockSyncRe); err != nil { + return r, err + } + if r.totalDuration, err = extract(totalDurationRe); err != nil { + return r, err + } + if r.finalHeight, err = extract(finalHeightRe); err != nil { + return r, err + } + return r, nil +} + +func printSummary(results []syncResult) { + n := len(results) + if n == 0 { + return + } + + totals := make([]float64, n) + blocks := make([]float64, n) + states := make([]float64, n) + bps := make([]float64, n) + for i, r := range results { + totals[i] = float64(r.totalDuration) + blocks[i] = float64(r.blockSyncDuration) + states[i] = float64(r.stateSyncDuration) + if r.blockSyncDuration > 0 { + bps[i] = float64(r.finalHeight) / float64(r.blockSyncDuration) + } + } + + fmt.Printf("\n=========================================\n") + fmt.Printf("SUMMARY (%d iterations)\n", n) + fmt.Printf("=========================================\n") + fmt.Printf("%-26s %8s %8s %8s %8s\n", "", "avg", "min", "max", "p99") + fmt.Printf("%-26s %8.1fs %8.1fs %8.1fs %8.1fs\n", "State sync duration:", avg(states), minVal(states), maxVal(states), percentile(states, 99)) + fmt.Printf("%-26s %8.1fs %8.1fs %8.1fs %8.1fs\n", "Block sync duration:", avg(blocks), minVal(blocks), maxVal(blocks), percentile(blocks, 99)) + fmt.Printf("%-26s %8.1fs %8.1fs %8.1fs %8.1fs\n", "Total sync duration:", avg(totals), minVal(totals), maxVal(totals), percentile(totals, 99)) + fmt.Printf("%-26s %8.2f %8.2f %8.2f %8.2f\n", "Blocks/sec:", avg(bps), minVal(bps), maxVal(bps), percentile(bps, 99)) + fmt.Printf("=========================================\n") +} + +func minVal(vals []float64) float64 { + m := vals[0] + for _, v := range vals[1:] { + if v < m { + m = v + } + } + return m +} + +func maxVal(vals []float64) float64 { + m := vals[0] + for _, v := range vals[1:] { + if v > m { + m = v + } + } + return m +} + +func avg(vals []float64) float64 { + sum := 0.0 + for _, v := range vals { + sum += v + } + return sum / float64(len(vals)) +} + +func percentile(vals []float64, p float64) float64 { + sorted := make([]float64, len(vals)) + copy(sorted, vals) + sort.Float64s(sorted) + idx := (p / 100) * float64(len(sorted)-1) + lower := int(math.Floor(idx)) + upper := int(math.Ceil(idx)) + if lower == upper { + return sorted[lower] + } + frac := idx - float64(lower) + return sorted[lower]*(1-frac) + sorted[upper]*frac +} + +const ( + syncTestMachineType = "c3d-highcpu-16" + syncTestDiskSizeGB = 400 +) + +func syncNodeCmd() *cobra.Command { + var ( + rootDir string + sshPubKeyPath string + sshKeyPath string + gcProject string + gcKeyJSONPath string + region string + iterations int + cooldown int + keep bool + binaryPath string + blockSyncOnly bool + ) + + cmd := &cobra.Command{ + Use: "sync-node", + Short: "Measure sync-to-tip speed on a Talis network", + Long: `Spins up a fresh GCP instance, deploys celestia-appd, syncs to tip +using the existing Talis validators (state sync by default, or block sync +from genesis with --block-sync-only), measures sync time, and tears down the instance.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + cfg.SSHPubKeyPath = resolveValue(sshPubKeyPath, EnvVarSSHKeyPath, cfg.SSHPubKeyPath) + cfg.GoogleCloudProject = resolveValue(gcProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(gcKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + + if cfg.GoogleCloudProject == "" { + return fmt.Errorf("google cloud project is required (use --gc-project, env GOOGLE_CLOUD_PROJECT, or config)") + } + + resolvedSSHKeyPath := resolveValue(sshKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + if resolvedSSHKeyPath == "" { + return fmt.Errorf("SSH private key path is required (use --ssh-key-path or set ssh_pub_key_path in config)") + } + + sshPubKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return fmt.Errorf("failed to read SSH public key at %s: %w", cfg.SSHPubKeyPath, err) + } + + if binaryPath == "" { + binaryPath = "build/celestia-appd" + } + if _, err := os.Stat(binaryPath); err != nil { + return fmt.Errorf("binary not found at %s: %w", binaryPath, err) + } + + opts, err := gcClientOptions(cfg) + if err != nil { + return fmt.Errorf("failed to create GCP client options: %w", err) + } + + // Pick region + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + + // Pick a random validator as RPC source + validator := cfg.Validators[rand.Intn(len(cfg.Validators))] + if validator.PublicIP == "" || validator.PublicIP == "TBD" { + return fmt.Errorf("selected validator %s has no public IP", validator.Name) + } + rpcEndpoint := fmt.Sprintf("http://%s:26657", validator.PublicIP) + log.Printf("Using validator %s (%s) as RPC source", validator.Name, validator.PublicIP) + + // Build peer list from all validators + var peers []string + for _, v := range cfg.Validators { + if v.PublicIP == "" || v.PublicIP == "TBD" { + continue + } + peers = append(peers, fmt.Sprintf("%s:26656", v.PublicIP)) + } + + // Create the sync test instance + syncInst := Instance{ + NodeType: Validator, + Name: fmt.Sprintf("sync-test-%d", time.Now().Unix()), + Provider: GoogleCloud, + Slug: syncTestMachineType, + Region: region, + Tags: []string{"talis", "sync-test"}, + } + + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + log.Printf("Creating sync-test instance in region %s...", region) + created, err := CreateGCInstances(ctx, cfg.GoogleCloudProject, []Instance{syncInst}, string(sshPubKey), opts, 1) + if err != nil { + return fmt.Errorf("failed to create GCP instance: %w", err) + } + if len(created) == 0 { + return fmt.Errorf("no instance was created") + } + + inst := created[0] + log.Printf("Instance %s created with IP %s", inst.Name, inst.PublicIP) + + // Setup cleanup on interrupt + teardown := func() { + if keep { + log.Printf("--keep flag set, leaving instance %s (%s) running", inst.Name, inst.PublicIP) + return + } + log.Printf("Tearing down instance %s...", inst.Name) + teardownCtx, teardownCancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer teardownCancel() + destroyInst := inst + destroyInst.Region = region + if _, err := DestroyGCInstances(teardownCtx, cfg.GoogleCloudProject, []Instance{destroyInst}, opts, 1); err != nil { + log.Printf("Warning: failed to destroy instance %s: %v", inst.Name, err) + } + } + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigCh + log.Printf("Received signal %v, cleaning up...", sig) + cancel() // Stop all in-flight operations (SSH loop, etc.) + teardown() + os.Exit(1) + }() + + defer func() { + signal.Stop(sigCh) + teardown() + }() + + // Wait for SSH to become available + log.Printf("Waiting for SSH to become available on %s...", inst.PublicIP) + log.Printf(" SSH private key: %s", resolvedSSHKeyPath) + log.Printf(" SSH public key: %s", cfg.SSHPubKeyPath) + if err := waitForSSH(ctx, inst.PublicIP, resolvedSSHKeyPath, 2*time.Minute); err != nil { + return fmt.Errorf("SSH not available: %w", err) + } + log.Printf("SSH is available") + + // SCP the binary to the instance + log.Printf("Uploading celestia-appd binary to %s...", inst.PublicIP) + if err := scpFile(ctx, binaryPath, inst.PublicIP, "/usr/local/bin/celestia-appd", resolvedSSHKeyPath); err != nil { + return fmt.Errorf("failed to upload binary: %w", err) + } + log.Printf("Binary uploaded successfully") + + // Make binary executable + if err := runSSHCommand(ctx, inst.PublicIP, resolvedSSHKeyPath, "chmod +x /usr/local/bin/celestia-appd"); err != nil { + return fmt.Errorf("failed to chmod binary: %w", err) + } + + var results []syncResult + + for i := 1; i <= iterations; i++ { + if iterations > 1 { + log.Printf("=== Starting iteration %d/%d ===", i, iterations) + } + + script := buildSyncScript(cfg.ChainID, rpcEndpoint, peers, i, iterations, blockSyncOnly) + log.Printf("Starting sync measurement on %s...", inst.PublicIP) + + output, err := runSSHStreaming(ctx, inst.PublicIP, resolvedSSHKeyPath, script) + if err != nil { + return fmt.Errorf("sync test failed on iteration %d: %w", i, err) + } + + result, err := parseSyncResult(output) + if err != nil { + log.Printf("Warning: could not parse results for iteration %d: %v", i, err) + } else { + results = append(results, result) + } + + if i < iterations { + log.Printf("Cooldown for %ds before next iteration...", cooldown) + select { + case <-time.After(time.Duration(cooldown) * time.Second): + case <-ctx.Done(): + return ctx.Err() + } + } + } + + if len(results) > 1 { + printSummary(results) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory with config.json") + cmd.Flags().StringVarP(&sshPubKeyPath, "ssh-pub-key-path", "s", "", "path to SSH public key") + cmd.Flags().StringVar(&sshKeyPath, "ssh-key-path", "", "path to SSH private key (default: derived from config's ssh_pub_key_path)") + cmd.Flags().StringVar(&gcProject, "gc-project", "", "Google Cloud project") + cmd.Flags().StringVar(&gcKeyJSONPath, "gc-key-json-path", "", "path to Google Cloud service account key JSON") + cmd.Flags().StringVarP(®ion, "region", "r", "random", "GCP region for the sync node") + cmd.Flags().IntVarP(&iterations, "iterations", "n", 1, "number of sync iterations") + cmd.Flags().IntVar(&cooldown, "cooldown", 30, "seconds between iterations") + cmd.Flags().BoolVar(&keep, "keep", false, "don't tear down the instance after (for debugging)") + cmd.Flags().BoolVar(&blockSyncOnly, "block-sync-only", false, "skip state sync and only block sync from genesis") + cmd.Flags().StringVar(&binaryPath, "binary-path", "", "path to celestia-appd binary to deploy (default: build/celestia-appd)") + + return cmd +} + +// waitForSSH polls until an SSH connection succeeds or the timeout is reached. +func waitForSSH(ctx context.Context, ip, sshKeyPath string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + attempt := 0 + var lastErr error + var lastOut string + for time.Now().Before(deadline) { + attempt++ + sshCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + ssh := exec.CommandContext(sshCtx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ConnectTimeout=5", + fmt.Sprintf("root@%s", ip), + "echo ok", + ) + out, err := ssh.CombinedOutput() + cancel() + outStr := strings.TrimSpace(string(out)) + if err == nil && strings.Contains(outStr, "ok") { + return nil + } + lastErr = err + lastOut = outStr + fmt.Fprintf(os.Stderr, " SSH attempt %d: err=%v out=%q\n", attempt, err, truncateOutput(outStr, 200)) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(5 * time.Second): + } + } + return fmt.Errorf("SSH not available on %s after %v (%d attempts), last error: %v, last output: %s", ip, timeout, attempt, lastErr, truncateOutput(lastOut, 500)) +} + +func truncateOutput(s string, maxLen int) string { + // Only keep the last maxLen characters for readability + s = strings.TrimSpace(s) + if len(s) > maxLen { + return "..." + s[len(s)-maxLen:] + } + return s +} + +// scpFile copies a local file to a remote path via SCP. +func scpFile(ctx context.Context, localPath, ip, remotePath, sshKeyPath string) error { + scp := exec.CommandContext(ctx, + "scp", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + localPath, + fmt.Sprintf("root@%s:%s", ip, remotePath), + ) + if out, err := scp.CombinedOutput(); err != nil { + return fmt.Errorf("scp error: %v\n%s", err, out) + } + return nil +} + +// runSSHCommand runs a command on a remote host via SSH and returns the error if any. +func runSSHCommand(ctx context.Context, ip, sshKeyPath, command string) error { + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", ip), + command, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("ssh error: %v\n%s", err, out) + } + return nil +} + +// runSSHStreaming runs a command on a remote host via SSH, streaming stdout/stderr +// directly to the user's terminal for real-time output. It also captures stdout +// and returns it for parsing. +func runSSHStreaming(ctx context.Context, ip, sshKeyPath, command string) (string, error) { + var buf bytes.Buffer + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ServerAliveInterval=30", + "-o", "ServerAliveCountMax=5", + fmt.Sprintf("root@%s", ip), + command, + ) + ssh.Stdout = io.MultiWriter(os.Stdout, &buf) + ssh.Stderr = os.Stderr + err := ssh.Run() + return buf.String(), err +} + +// buildSyncScript generates the shell script that runs on the remote instance to +// perform state sync configuration, start the node, and measure sync times. +func buildSyncScript(chainID, rpcEndpoint string, peerIPs []string, iteration, totalIterations int, blockSyncOnly bool) string { + peersStr := strings.Join(peerIPs, ",") + blockSyncOnlyStr := "false" + if blockSyncOnly { + blockSyncOnlyStr = "true" + } + + return fmt.Sprintf(`#!/bin/bash +set -euo pipefail + +CHAIN_ID="%s" +RPC="%s" +PEERS="%s" +ITERATION=%d +TOTAL_ITERATIONS=%d +BLOCK_SYNC_ONLY=%s +HOME_DIR="/root/.celestia-app-sync" +POLL_INTERVAL=5 +SYNC_TIMEOUT=7200 + +printf "\n=========================================\n" +printf "SYNC TEST - ITERATION %%d/%%d\n" "$ITERATION" "$TOTAL_ITERATIONS" +printf "=========================================\n" +printf "Chain ID: %%s\n" "$CHAIN_ID" +printf "RPC: %%s\n" "$RPC" +printf "=========================================\n\n" + +# Install jq if not present +if ! command -v jq &>/dev/null; then + echo "Installing jq..." + apt-get update -qq && apt-get install -y -qq jq >/dev/null 2>&1 +fi + +# Clean up any previous run +rm -rf "$HOME_DIR" + +echo "Initializing celestia-appd..." +celestia-appd init sync-node --chain-id "$CHAIN_ID" --home "$HOME_DIR" >/dev/null 2>&1 + +# Fetch genesis from validator RPC +echo "Fetching genesis from $RPC..." +for attempt in $(seq 1 5); do + if curl -sf "$RPC/genesis" | jq '.result.genesis' > "$HOME_DIR/config/genesis.json" 2>/dev/null; then + GENESIS_SIZE=$(wc -c < "$HOME_DIR/config/genesis.json") + echo "Genesis saved ($GENESIS_SIZE bytes)" + break + fi + echo "Attempt $attempt failed, retrying in 5s..." + sleep 5 +done + +if [ ! -s "$HOME_DIR/config/genesis.json" ]; then + echo "ERROR: Failed to fetch genesis" + exit 1 +fi + +# Fetch node IDs and build persistent_peers +echo "Building peer list..." +PERSISTENT_PEERS="" +for peer_addr in $(echo "$PEERS" | tr ',' ' '); do + PEER_IP=$(echo "$peer_addr" | cut -d: -f1) + PEER_PORT=$(echo "$peer_addr" | cut -d: -f2) + PEER_RPC="http://${PEER_IP}:26657" + + NODE_ID=$(curl -sf "$PEER_RPC/status" 2>/dev/null | jq -r '.result.node_info.id // empty' 2>/dev/null || true) + if [ -n "$NODE_ID" ]; then + if [ -n "$PERSISTENT_PEERS" ]; then + PERSISTENT_PEERS="${PERSISTENT_PEERS}," + fi + PERSISTENT_PEERS="${PERSISTENT_PEERS}${NODE_ID}@${PEER_IP}:${PEER_PORT}" + echo " Added peer: ${NODE_ID}@${PEER_IP}:${PEER_PORT}" + else + echo " Warning: could not get node ID for $PEER_IP" + fi +done + +if [ -z "$PERSISTENT_PEERS" ]; then + echo "ERROR: No peers found" + exit 1 +fi + +echo "Found $(echo "$PERSISTENT_PEERS" | tr ',' '\n' | wc -l | tr -d ' ') peers" + +# Configure persistent peers +sed -i "s|^persistent_peers *=.*|persistent_peers = \"$PERSISTENT_PEERS\"|" "$HOME_DIR/config/config.toml" + +# Disable block sync verification for faster sync +sed -i -E "s|^verify_data *=.*|verify_data = false|" "$HOME_DIR/config/config.toml" + +# Query network for latest height +echo "Querying network for latest height..." +LATEST_HEIGHT=$(curl -sf "$RPC/block" | jq -r '.result.block.header.height') +echo "Latest height: $LATEST_HEIGHT" + +if [ "$BLOCK_SYNC_ONLY" = "true" ]; then + echo "Block sync only mode: skipping state sync, syncing from genesis" + BLOCK_HEIGHT=0 +else + BLOCK_HEIGHT=$((LATEST_HEIGHT - 1000)) + TRUST_HASH=$(curl -sf "$RPC/block?height=$BLOCK_HEIGHT" | jq -r '.result.block_id.hash') + + echo "Trust height: $BLOCK_HEIGHT" + echo "Trust hash: $TRUST_HASH" + + # Enable state sync + sed -i -E "s|^(enable[[:space:]]+=[[:space:]]+).*$|\1true|" "$HOME_DIR/config/config.toml" + sed -i -E "s|^(rpc_servers[[:space:]]+=[[:space:]]+).*$|\1\"$RPC,$RPC\"|" "$HOME_DIR/config/config.toml" + sed -i -E "s|^(trust_height[[:space:]]+=[[:space:]]+).*$|\1$BLOCK_HEIGHT|" "$HOME_DIR/config/config.toml" + sed -i -E "s|^(trust_hash[[:space:]]+=[[:space:]]+).*$|\1\"$TRUST_HASH\"|" "$HOME_DIR/config/config.toml" +fi + +echo "" +echo "Starting celestia-appd..." +START_TIME=$(date +%%s) + +celestia-appd start --home "$HOME_DIR" --force-no-bbr >/root/sync-node.log 2>&1 & +NODE_PID=$! + +cleanup() { + kill -TERM "$NODE_PID" 2>/dev/null || true +} +trap cleanup EXIT INT TERM + +# Wait for RPC to be available +echo "Waiting for local RPC..." +for i in $(seq 1 60); do + if curl -sf http://localhost:26657/status >/dev/null 2>&1; then + echo "Local RPC is available" + break + fi + sleep 2 +done + +if ! curl -sf http://localhost:26657/status >/dev/null 2>&1; then + echo "ERROR: Local RPC not available after 120s" + echo "Last 50 lines of log:" + tail -50 /root/sync-node.log || true + exit 1 +fi + +printf "\n=== Monitoring Sync Progress ===\n" +STATE_SYNC_COMPLETE=false +STATE_SYNC_END_TIME="" +PREV_HEIGHT=0 +STALL_COUNT=0 +MAX_STALLS=24 + +elapsed=0 +while [ $elapsed -lt $SYNC_TIMEOUT ]; do + # Check if process is still alive + if ! kill -0 $NODE_PID 2>/dev/null; then + echo "ERROR: celestia-appd process died" + echo "Last 50 lines of log:" + tail -50 /root/sync-node.log || true + exit 1 + fi + + STATUS=$(curl -sf http://localhost:26657/status 2>/dev/null || echo "{}") + CATCHING_UP=$(echo "$STATUS" | jq -r '.result.sync_info.catching_up // "true"') + CURRENT_HEIGHT=$(echo "$STATUS" | jq -r '.result.sync_info.latest_block_height // "0"') + NETWORK_TIP=$(curl -sf "$RPC/block" 2>/dev/null | jq -r '.result.block.header.height // "0"' 2>/dev/null || echo "0") + BLOCKS_BEHIND=$((NETWORK_TIP - CURRENT_HEIGHT)) + [ $BLOCKS_BEHIND -lt 0 ] && BLOCKS_BEHIND=0 + + # Detect stalled sync + if [ "$CURRENT_HEIGHT" = "$PREV_HEIGHT" ] && [ "$CURRENT_HEIGHT" != "0" ] && [ "$BLOCKS_BEHIND" -gt "5" ]; then + STALL_COUNT=$((STALL_COUNT + 1)) + if [ $STALL_COUNT -ge $MAX_STALLS ]; then + NUM_PEERS=$(curl -sf http://localhost:26657/net_info 2>/dev/null | jq -r '.result.n_peers // "0"' 2>/dev/null || echo "0") + echo "ERROR: Sync stalled for 2 minutes at height $CURRENT_HEIGHT" + echo "Peers connected: $NUM_PEERS" + echo "Last 50 lines of log:" + tail -50 /root/sync-node.log || true + exit 1 + fi + echo "[$(date +%%T)] Height: $CURRENT_HEIGHT / $NETWORK_TIP (${BLOCKS_BEHIND} behind) | STALLED ($STALL_COUNT/${MAX_STALLS})" + else + STALL_COUNT=0 + echo "[$(date +%%T)] Height: $CURRENT_HEIGHT / $NETWORK_TIP (${BLOCKS_BEHIND} behind) | Catching up: $CATCHING_UP" + fi + PREV_HEIGHT=$CURRENT_HEIGHT + + # Check state sync completion + if [ "$STATE_SYNC_COMPLETE" = "false" ] && [ "$CURRENT_HEIGHT" -ge "$BLOCK_HEIGHT" ] 2>/dev/null; then + STATE_SYNC_END_TIME=$(date +%%s) + STATE_SYNC_DURATION=$((STATE_SYNC_END_TIME - START_TIME)) + printf "\nState sync complete! Reached trust height %%s (%%ss)\n=== Monitoring Block Sync to Tip ===\n" "$BLOCK_HEIGHT" "$STATE_SYNC_DURATION" + STATE_SYNC_COMPLETE=true + fi + + # Check if fully synced + if [ "$BLOCKS_BEHIND" -le "0" ] 2>/dev/null; then + TOTAL_END_TIME=$(date +%%s) + TOTAL_DURATION=$((TOTAL_END_TIME - START_TIME)) + BLOCK_SYNC_DURATION=$((TOTAL_END_TIME - ${STATE_SYNC_END_TIME:-$START_TIME})) + + if [ -z "${STATE_SYNC_END_TIME:-}" ]; then + STATE_SYNC_DURATION=$TOTAL_DURATION + fi + + printf "\n=========================================\n" + printf "ITERATION %%d/%%d COMPLETE\n" "$ITERATION" "$TOTAL_ITERATIONS" + printf "=========================================\n" + printf "State sync duration: %%ss\n" "${STATE_SYNC_DURATION:-$TOTAL_DURATION}" + printf "Block sync duration: %%ss\n" "$BLOCK_SYNC_DURATION" + printf "Total sync duration: %%ss\n" "$TOTAL_DURATION" + printf "Final height: %%s\n" "$CURRENT_HEIGHT" + printf "Network tip: %%s\n" "$NETWORK_TIP" + printf "=========================================\n" + + kill -TERM "$NODE_PID" 2>/dev/null || true + trap - EXIT INT TERM + exit 0 + fi + + sleep $POLL_INTERVAL + elapsed=$((elapsed + POLL_INTERVAL)) +done + +echo "ERROR: Sync timeout (${SYNC_TIMEOUT}s)" +exit 1 +`, chainID, rpcEndpoint, peersStr, iteration, totalIterations, blockSyncOnlyStr) +} diff --git a/tools/talis/txsim.go b/tools/talis/txsim.go new file mode 100644 index 0000000000..390b2137b7 --- /dev/null +++ b/tools/talis/txsim.go @@ -0,0 +1,140 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const ( + TxSimSessionName = "txsim" +) + +// startTxsimCmd creates a cobra command for starting txsim on remote instances. +func startTxsimCmd() *cobra.Command { + var ( + instances int + seqCount int + blobsPerPFB int + startSize int + endSize int + rootDir string + cfgPath string + SSHKeyPath string + fireAndForget bool + fireAndForgetDelay time.Duration + ) + + cmd := &cobra.Command{ + Use: "txsim", + Short: "Starts the txsim command on remote validators", + Long: "Connects to remote validators and starts the txsim command in a detached tmux session.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + txsimScript := fmt.Sprintf( + "txsim .celestia-app/ --blob %d --blob-amounts %d --blob-sizes %d-%d --grpc-endpoint localhost:9091 --feegrant", + seqCount, + blobsPerPFB, + startSize, + endSize, + ) + if fireAndForget { + txsimScript += fmt.Sprintf(" --fire-and-forget --fire-and-forget-delay %s", fireAndForgetDelay.String()) + } + txsimScript += " > txsim.log" + + // only spin up txsim on the number of instances that were specified. + insts := []Instance{} + for i, val := range cfg.Validators { + if i >= instances || i >= len(cfg.Validators) { + break + } + insts = append(insts, val) + } + + fmt.Println(insts, "\n", txsimScript) + + return runScriptInTMux(insts, resolvedSSHKeyPath, txsimScript, TxSimSessionName, time.Minute*5) + }, + } + + // Define flags for the command + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") // Keep cfgPath flag for consistency with other commands, although not strictly used after LoadConfig. + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to the user's SSH key (overrides environment variable and default)") + cmd.Flags().IntVarP(&seqCount, "sequences", "s", 1, "the number of sequences (concurrent PFB streams) ran by each txsim instance") + cmd.Flags().IntVarP(&instances, "instances", "i", 1, "the number of instances of txsim, each ran on its own validator") + cmd.Flags().IntVarP(&blobsPerPFB, "blobs-per-pfb", "b", 1, "the number of blobs in each PFB") + cmd.Flags().IntVarP(&startSize, "min-blob-size", "m", 1000000, "the min number of bytes in each blob") + cmd.Flags().IntVarP(&endSize, "max-blob-size", "x", 1900000, "the max number of bytes in each blob") + cmd.Flags().BoolVar(&fireAndForget, "fire-and-forget", false, "enable fire-and-forget mode (broadcast txs without waiting for inclusion)") + cmd.Flags().DurationVar(&fireAndForgetDelay, "fire-and-forget-delay", 500*time.Millisecond, "delay between submissions in fire-and-forget mode") + _ = cmd.MarkFlagRequired("sequences") + _ = cmd.MarkFlagRequired("instances") + return cmd +} + +// killTmuxSessionCmd creates a cobra command for killing a tmux session on remote validators. +func killTmuxSessionCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + session string + timeout time.Duration + ) + + cmd := &cobra.Command{ + Use: "kill-session", + Short: "Kills a detached tmux session on remote validators", + Long: "Connects to remote validator nodes and kills the named tmux session (errors suppressed).", + Aliases: []string{"k"}, + RunE: func(cmd *cobra.Command, args []string) error { + // Load config + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + // Resolve SSH key + resolvedKey := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Raw kill session (suppress errors so no output if session doesn't exist) + killScript := fmt.Sprintf( + "tmux kill-session -t %s 2>/dev/null", + session, + ) + + // Target all instance types: validators + encoders + targets := append([]Instance{}, cfg.Validators...) + targets = append(targets, cfg.Encoders...) + + // Run the kill script in its own tmux on each host + return runScriptInTMux(targets, resolvedKey, killScript, "kill", timeout) + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory to load config from") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "config file name") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().StringVarP(&session, "session", "s", "txsim", "name of the tmux session to kill") + _ = cmd.MarkFlagRequired("session") + cmd.Flags().DurationVarP(&timeout, "timeout", "t", time.Minute*2, "how long to wait for SSH/tmux commands to complete") + + return cmd +} diff --git a/tools/talis/upload_data.go b/tools/talis/upload_data.go new file mode 100644 index 0000000000..a23a5b8cb9 --- /dev/null +++ b/tools/talis/upload_data.go @@ -0,0 +1,54 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +// uploadDataCmd creates a cobra command for kicking off trace collection +func uploadDataCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + ) + + cmd := &cobra.Command{ + Use: "upload-data", + Short: "Upload data from the talis network", + Long: "Connects to every node in the network and starts the upload_traces.sh script in a detached tmux session.", + Aliases: []string{"u"}, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators (nodes) found in config") + } + + resolvedKey := resolveValue( + SSHKeyPath, + EnvVarSSHKeyPath, + strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", ""), + ) + + const sessionName = "traces" + return runScriptInTMux( + cfg.Validators, + resolvedKey, + "source /root/payload/upload_traces.sh", + sessionName, + time.Minute*5, + ) + }, + } + + // define your flags + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory containing your config") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "override path to your SSH private key") + return cmd +} diff --git a/tools/talis/util_test.go b/tools/talis/util_test.go new file mode 100644 index 0000000000..109c1beb65 --- /dev/null +++ b/tools/talis/util_test.go @@ -0,0 +1,35 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMatchPattern(t *testing.T) { + tests := []struct { + name string + pattern string + input string + expected bool + }{ + {"wildcard suffix match", "validator-*", "validator-0", true}, + {"wildcard matches anything", "*", "anything", true}, + {"wildcard matches exact", "node-*", "node-123", true}, + {"wildcard mismatch", "node-*", "validator-1", false}, + {"exact match", "node-1", "node-1", true}, + {"exact mismatch", "node-1", "node-2", false}, + {"prefix only", "*-0", "validator-0", true}, + {"suffix only", "validator-*", "node-0", false}, + {"empty pattern matches nothing", "", "anything", false}, + {"wildcard middle", "val*-1", "validator-1", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + match, err := matchPattern(tt.pattern, tt.input) + require.NoError(t, err) + require.Equal(t, tt.expected, match) + }) + } +}