Skip to content

Commit cd18407

Browse files
committed
add changes from pr
1 parent 398e911 commit cd18407

File tree

2 files changed

+37
-31
lines changed

2 files changed

+37
-31
lines changed

block/submitter.go

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -15,17 +15,18 @@ const (
1515
noGasPrice = -1
1616
)
1717

18-
19-
type RetryStrategy struct {
18+
// retryStrategy manages retry logic with backoff and gas price adjustments for DA submissions
19+
type retryStrategy struct {
2020
attempt int
2121
backoff time.Duration
2222
gasPrice float64
2323
initialGasPrice float64
2424
maxAttempts int
2525
}
2626

27-
func NewRetryStrategy(initialGasPrice float64) *RetryStrategy {
28-
return &RetryStrategy{
27+
// newRetryStrategy creates a new retryStrategy with the given initial gas price
28+
func newRetryStrategy(initialGasPrice float64) *retryStrategy {
29+
return &retryStrategy{
2930
attempt: 0,
3031
backoff: 0,
3132
gasPrice: initialGasPrice,
@@ -34,27 +35,32 @@ func NewRetryStrategy(initialGasPrice float64) *RetryStrategy {
3435
}
3536
}
3637

37-
func (r *RetryStrategy) ShouldContinue() bool {
38+
// ShouldContinue returns true if the retry strategy should continue attempting submissions
39+
func (r *retryStrategy) ShouldContinue() bool {
3840
return r.attempt < r.maxAttempts
3941
}
4042

41-
func (r *RetryStrategy) NextAttempt() {
43+
// NextAttempt increments the attempt counter
44+
func (r *retryStrategy) NextAttempt() {
4245
r.attempt++
4346
}
4447

45-
func (r *RetryStrategy) ResetOnSuccess(gasMultiplier float64) {
48+
// ResetOnSuccess resets backoff and adjusts gas price downward after a successful submission
49+
func (r *retryStrategy) ResetOnSuccess(gasMultiplier float64) {
4650
r.backoff = 0
4751
if gasMultiplier > 0 && r.gasPrice != noGasPrice {
4852
r.gasPrice = r.gasPrice / gasMultiplier
4953
r.gasPrice = max(r.gasPrice, r.initialGasPrice)
5054
}
5155
}
5256

53-
func (r *RetryStrategy) BackoffOnFailure(m *Manager) {
57+
// BackoffOnFailure applies exponential backoff after a submission failure
58+
func (r *retryStrategy) BackoffOnFailure(m *Manager) {
5459
r.backoff = m.exponentialBackoff(r.backoff)
5560
}
5661

57-
func (r *RetryStrategy) BackoffOnMempool(mempoolTTL int, blockTime time.Duration, gasMultiplier float64) {
62+
// BackoffOnMempool applies mempool-specific backoff and increases gas price when transaction is stuck in mempool
63+
func (r *retryStrategy) BackoffOnMempool(mempoolTTL int, blockTime time.Duration, gasMultiplier float64) {
5864
r.backoff = blockTime * time.Duration(mempoolTTL)
5965
if gasMultiplier > 0 && r.gasPrice != noGasPrice {
6066
r.gasPrice = r.gasPrice * gasMultiplier
@@ -75,7 +81,7 @@ func handleSuccessfulSubmission[T any](
7581
marshaled [][]byte,
7682
res *coreda.ResultSubmit,
7783
postSubmit func([]T, *coreda.ResultSubmit, float64),
78-
retryStrategy *RetryStrategy,
84+
retryStrategy *retryStrategy,
7985
itemType string,
8086
) SubmissionOutcome[T] {
8187
m.recordDAMetrics("submission", DAModeSuccess)
@@ -111,7 +117,7 @@ func handleSuccessfulSubmission[T any](
111117
func handleMempoolFailure(
112118
m *Manager,
113119
res *coreda.ResultSubmit,
114-
retryStrategy *RetryStrategy,
120+
retryStrategy *retryStrategy,
115121
attempt int,
116122
) {
117123
m.logger.Error("DA layer submission failed",
@@ -131,7 +137,7 @@ func handleTooBigError[T any](
131137
ctx context.Context,
132138
remaining []T,
133139
marshaled [][]byte,
134-
retryStrategy *RetryStrategy,
140+
retryStrategy *retryStrategy,
135141
postSubmit func([]T, *coreda.ResultSubmit, float64),
136142
itemType string,
137143
attempt int,
@@ -178,7 +184,7 @@ func handleTooBigError[T any](
178184
func handleGenericFailure(
179185
m *Manager,
180186
res *coreda.ResultSubmit,
181-
retryStrategy *RetryStrategy,
187+
retryStrategy *retryStrategy,
182188
attempt int,
183189
) {
184190
m.logger.Error("DA layer submission failed",
@@ -269,7 +275,7 @@ func submitToDA[T any](
269275
return err
270276
}
271277

272-
retryStrategy := NewRetryStrategy(m.gasPrice)
278+
retryStrategy := newRetryStrategy(m.gasPrice)
273279
remaining := items
274280
numSubmitted := 0
275281

@@ -341,7 +347,7 @@ func handleSubmissionResult[T any](
341347
res coreda.ResultSubmit,
342348
remaining []T,
343349
marshaled [][]byte,
344-
retryStrategy *RetryStrategy,
350+
retryStrategy *retryStrategy,
345351
postSubmit func([]T, *coreda.ResultSubmit, float64),
346352
itemType string,
347353
) SubmissionOutcome[T] {

test/e2e/evm_da_restart_e2e_test.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,11 @@
1111
//
1212
// Test Coverage:
1313
// TestEvmDARestartWithPendingBlocksE2E - Tests the scenario where:
14-
// 1. Blocks are created and published to DA
15-
// 2. DA layer is killed
16-
// 3. More blocks are created until pending blocks exceed max blob size
17-
// 4. DA layer is restarted
18-
// 5. Verification that pending blocks are submitted correctly without infinite loops
14+
// 1. Blocks are created and published to DA
15+
// 2. DA layer is killed
16+
// 3. More blocks are created until pending blocks exceed max blob size
17+
// 4. DA layer is restarted
18+
// 5. Verification that pending blocks are submitted correctly without infinite loops
1919
package e2e
2020

2121
import (
@@ -135,7 +135,7 @@ func TestEvmDARestartWithPendingBlocksE2E(t *testing.T) {
135135
currentHeader, err := client.HeaderByNumber(ctx, nil)
136136
if err == nil {
137137
currentHeight := currentHeader.Number.Uint64()
138-
t.Logf("Progress: Block batch %d/%d completed. Current height: %d, Pending txs: %d",
138+
t.Logf("Progress: Block batch %d/%d completed. Current height: %d, Pending txs: %d",
139139
block+1, targetBlocks, currentHeight, len(pendingTxHashes))
140140
}
141141
}
@@ -166,7 +166,7 @@ func TestEvmDARestartWithPendingBlocksE2E(t *testing.T) {
166166
// Use an extremely small max blob size (100 bytes) to guarantee StatusTooBig with our large batch
167167
sut.ExecCmd(localDABinary, "-max-blob-size", "100")
168168
t.Log("✅ DA layer restarted")
169-
169+
170170
// Wait for DA to be ready
171171
time.Sleep(2 * time.Second)
172172

@@ -176,8 +176,8 @@ func TestEvmDARestartWithPendingBlocksE2E(t *testing.T) {
176176
// Monitor the recovery process for a reasonable amount of time
177177
recoveryStart := time.Now()
178178
recoveryTimeout := 30 * time.Second // Reduced to 30 seconds to quickly detect infinite loop
179-
checkInterval := 2 * time.Second // Check more frequently
180-
179+
checkInterval := 2 * time.Second // Check more frequently
180+
181181
var recoverySuccess bool
182182
var finalTxCount int
183183

@@ -194,7 +194,7 @@ func TestEvmDARestartWithPendingBlocksE2E(t *testing.T) {
194194
finalTxCount = includedCount
195195
progressPct := float64(includedCount) / float64(len(pendingTxHashes)) * 100
196196

197-
t.Logf("Recovery progress: %d/%d transactions included (%.1f%%) - Elapsed: %v",
197+
t.Logf("Recovery progress: %d/%d transactions included (%.1f%%) - Elapsed: %v",
198198
includedCount, len(pendingTxHashes), progressPct, elapsed)
199199

200200
// Check if all transactions are included
@@ -212,15 +212,15 @@ func TestEvmDARestartWithPendingBlocksE2E(t *testing.T) {
212212

213213
// Verify that the recovery process completed successfully
214214
if !recoverySuccess {
215-
t.Logf("⚠️ Recovery incomplete after %v: %d/%d transactions included",
215+
t.Logf("⚠️ Recovery incomplete after %v: %d/%d transactions included",
216216
recoveryTimeout, finalTxCount, len(pendingTxHashes))
217-
217+
218218
// Even if not all transactions are included, let's verify there's no infinite loop
219219
// by checking that some progress was made
220-
require.Greater(t, finalTxCount, 0,
220+
require.Greater(t, finalTxCount, 0,
221221
"At least some pending transactions should be included after DA restart")
222-
require.Greater(t, float64(finalTxCount)/float64(len(pendingTxHashes)), 0.5,
223-
"At least 50%% of pending transactions should be included to prove recovery is working")
222+
require.Greater(t, float64(finalTxCount)/float64(len(pendingTxHashes)), 0.5,
223+
"At least 50% of pending transactions should be included to prove recovery is working")
224224
}
225225

226226
// Verify final block height increased appropriately
@@ -257,4 +257,4 @@ func TestEvmDARestartWithPendingBlocksE2E(t *testing.T) {
257257
t.Logf(" ✅ No infinite loops detected during recovery")
258258
t.Logf(" ✅ System stability confirmed with final transaction")
259259
t.Logf(" 🚀 Total test duration: %v", time.Since(blockCreationStart))
260-
}
260+
}

0 commit comments

Comments
 (0)