Skip to content

Commit ef9e37e

Browse files
update lumen to ev-reth and update logs statements
1 parent 4da2ecb commit ef9e37e

File tree

23 files changed

+68
-76
lines changed

23 files changed

+68
-76
lines changed

apps/evm/single/docker-compose.yml

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
1-
version: "3.8"
21

32
services:
4-
rollkit-reth:
5-
container_name: rollkit-reth
3+
ev-reth:
4+
container_name: ev-reth
65
restart: unless-stopped
7-
image: ghcr.io/evstack/lumen:latest
6+
image: ghcr.io/evstack/ev-reth:latest
87
ports:
98
- "9001:9001" # metrics
109
- "30303:30303" # eth/66 peering
@@ -18,7 +17,7 @@ services:
1817
entrypoint: /bin/sh -c
1918
command:
2019
- |
21-
lumen node \
20+
ev-reth node \
2221
--chain /root/chain/genesis.json \
2322
--datadir /home/reth/eth-home \
2423
--metrics 0.0.0.0:9001 \
@@ -39,22 +38,22 @@ services:
3938
--txpool.max-account-slots 2048 \
4039
--txpool.max-new-txns 2048 \
4140
--txpool.additional-validation-tasks 16 \
42-
--rollkit.enable
41+
--ev-reth.enable
4342
networks:
44-
- rollkit-network
43+
- evolve-network
4544

4645
local-da:
47-
image: ghcr.io/rollkit/local-da:v0.1.0
46+
image: ghcr.io/evstack/local-da:v0.1.0
4847
ports:
4948
- "7980:7980"
5049
command: ["-listen-all"]
5150
networks:
52-
- rollkit-network
51+
- evolve-network
5352

54-
rollkit-evm-single:
53+
ev-node-evm-single:
5554
image: ghcr.io/evstack/ev-node-evm-single:main
5655
depends_on:
57-
rollkit-reth:
56+
ev-reth:
5857
condition: service_started
5958
local-da:
6059
condition: service_started
@@ -64,8 +63,8 @@ services:
6463
entrypoint: /usr/bin/entrypoint.sh
6564
command: start
6665
environment:
67-
- EVM_ENGINE_URL=http://rollkit-reth:8551
68-
- EVM_ETH_URL=http://rollkit-reth:8545
66+
- EVM_ENGINE_URL=http://ev-reth:8551
67+
- EVM_ETH_URL=http://ev-reth:8545
6968
- EVM_JWT_SECRET=f747494bb0fb338a0d71f5f9fe5b5034c17cc988c229b59fd71e005ee692e9bf
7069
- EVM_GENESIS_HASH=0x2b8bbb1ea1e04f9c9809b4b278a8687806edc061a356c7dbc491930d8e922503
7170
- EVM_BLOCK_TIME=1s
@@ -74,14 +73,14 @@ services:
7473
# - DA_AUTH_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJwdWJsaWMiLCJyZWFkIiwid3JpdGUiXSwiTm9uY2UiOiJQcEswTmhyWi9IY05NWkVtUG9sSXNpRTRDcUpMdE9mbWtBMW0zMWFUaEswPSIsIkV4cGlyZXNBdCI6IjAwMDEtMDEtMDFUMDA6MDA6MDBaIn0.gaWh6tS6Rel1XFYclDkapNnZlaZVjrikCRNBxSDkCGk
7574
# - DA_NAMESPACE=00000000000000000000000000000000000000000008e5f679bf7116c1
7675
networks:
77-
- rollkit-network
76+
- evolve-network
7877

7978
volumes:
8079
evm-single-data:
8180
reth:
8281

8382
networks:
84-
rollkit-network:
83+
evolve-network:
8584
driver: bridge
8685
ipam:
8786
config:

block/aggregation.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ func (m *Manager) AggregationLoop(ctx context.Context, errCh chan<- error) {
2424
}
2525

2626
if delay > 0 {
27-
m.logger.Info("waiting to produce block", "delay", delay)
27+
m.logger.Info("waiting to produce block, delay:", delay)
2828
time.Sleep(delay)
2929
}
3030

block/aggregation_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ func TestAggregationLoop_Normal_BasicInterval(t *testing.T) {
8686
m.logger.Info("AggregationLoop exited")
8787
}()
8888

89-
m.logger.Info("Waiting for blocks...", "duration", waitTime)
89+
m.logger.Info("Waiting for blocks..., duration:", waitTime)
9090
time.Sleep(waitTime)
9191

9292
m.logger.Info("Cancelling context")
@@ -98,7 +98,7 @@ func TestAggregationLoop_Normal_BasicInterval(t *testing.T) {
9898
publishLock.Lock()
9999
defer publishLock.Unlock()
100100

101-
m.logger.Info("Recorded publish times", "count", len(publishTimes), "times", publishTimes)
101+
m.logger.Info("Recorded publish times, count:", len(publishTimes), "times:", publishTimes)
102102

103103
expectedCallsLow := int(waitTime/blockTime) - 1
104104
expectedCallsHigh := int(waitTime/blockTime) + 1

block/manager.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -322,22 +322,22 @@ func NewManager(
322322
}
323323

324324
if config.DA.BlockTime.Duration == 0 {
325-
logger.Info("using default DA block time", "DABlockTime", defaultDABlockTime)
325+
logger.Info("using default DA block time, DABlockTime:", defaultDABlockTime)
326326
config.DA.BlockTime.Duration = defaultDABlockTime
327327
}
328328

329329
if config.Node.BlockTime.Duration == 0 {
330-
logger.Info("using default block time", "BlockTime", defaultBlockTime)
330+
logger.Info("using default block time, BlockTime:", defaultBlockTime)
331331
config.Node.BlockTime.Duration = defaultBlockTime
332332
}
333333

334334
if config.Node.LazyBlockInterval.Duration == 0 {
335-
logger.Info("using default lazy block time", "LazyBlockTime", defaultLazyBlockTime)
335+
logger.Info("using default lazy block time, LazyBlockTime:", defaultLazyBlockTime)
336336
config.Node.LazyBlockInterval.Duration = defaultLazyBlockTime
337337
}
338338

339339
if config.DA.MempoolTTL == 0 {
340-
logger.Info("using default mempool ttl", "MempoolTTL", defaultMempoolTTL)
340+
logger.Info("using default mempool ttl, MempoolTTL:", defaultMempoolTTL)
341341
config.DA.MempoolTTL = defaultMempoolTTL
342342
}
343343

@@ -645,7 +645,7 @@ func (m *Manager) publishBlockInternal(ctx context.Context) error {
645645
// If there is use that instead of creating a new block
646646
pendingHeader, pendingData, err := m.store.GetBlockData(ctx, newHeight)
647647
if err == nil {
648-
m.logger.Info("using pending block", "height", newHeight)
648+
m.logger.Info("using pending block, height:", newHeight)
649649
header = pendingHeader
650650
data = pendingData
651651
} else {

block/reaper.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ func (r *Reaper) Start(ctx context.Context) {
5555
ticker := time.NewTicker(r.interval)
5656
defer ticker.Stop()
5757

58-
r.logger.Info("Reaper started", "interval", r.interval)
58+
r.logger.Info("Reaper started, interval:", r.interval)
5959

6060
for {
6161
select {

block/retriever.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ func (m *Manager) handlePotentialHeader(ctx context.Context, bz []byte, daHeight
143143
headerHash := header.Hash().String()
144144
m.headerCache.SetDAIncluded(headerHash, daHeight)
145145
m.sendNonBlockingSignalToDAIncluderCh()
146-
m.logger.Info("header marked as DA included, headerHeight: ", header.Height(), "headerHash: ", headerHash)
146+
m.logger.Info(fmt.Sprintf("header marked as DA included, headerHeight: %d, headerHash: %s", header.Height(), headerHash))
147147
if !m.headerCache.IsSeen(headerHash) {
148148
select {
149149
case <-ctx.Done():
@@ -178,7 +178,7 @@ func (m *Manager) handlePotentialData(ctx context.Context, bz []byte, daHeight u
178178
dataHashStr := signedData.Data.DACommitment().String()
179179
m.dataCache.SetDAIncluded(dataHashStr, daHeight)
180180
m.sendNonBlockingSignalToDAIncluderCh()
181-
m.logger.Info("signed data marked as DA included, dataHash: ", dataHashStr, "daHeight: ", daHeight, "height: ", signedData.Height())
181+
m.logger.Info(fmt.Sprintf("signed data marked as DA included, dataHash: %s, daHeight: %d, height: %d", dataHashStr, daHeight, signedData.Height()))
182182
if !m.dataCache.IsSeen(dataHashStr) {
183183
select {
184184
case <-ctx.Done():

block/submitter.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -150,14 +150,14 @@ func submitToDA[T any](
150150
if m.gasMultiplier > 0 && gasPrice != -1 {
151151
gasPrice = gasPrice * m.gasMultiplier
152152
}
153-
m.logger.Info("retrying DA layer submission with", "backoff", backoff, "gasPrice", gasPrice)
153+
m.logger.Info(fmt.Sprintf("retrying DA layer submission with, backoff: %s, gasPrice: %v", backoff, gasPrice))
154154
case coreda.StatusContextCanceled:
155-
m.logger.Info("DA layer submission canceled due to context cancellation", "attempt", attempt)
155+
m.logger.Info(fmt.Sprintf("DA layer submission canceled due to context cancellation, attempt: %d", attempt))
156156
return nil
157157
case coreda.StatusTooBig:
158158
fallthrough
159159
default:
160-
m.logger.Error("DA layer submission failed", "error", res.Message, "attempt", attempt)
160+
m.logger.Error(fmt.Sprintf("DA layer submission failed, error: %s, attempt: %d", res.Message, attempt))
161161
// Record failed DA submission (will retry)
162162
m.recordDAMetrics("submission", DAModeFail)
163163
backoff = m.exponentialBackoff(backoff)

da/cmd/local-da/local.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ func (d *LocalDA) Commit(ctx context.Context, blobs []coreda.Blob, _ []byte) ([]
161161

162162
// SubmitWithOptions stores blobs in DA layer (options are ignored).
163163
func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, _ []byte, _ []byte) ([]coreda.ID, error) {
164-
d.logger.Info("SubmitWithOptions called, numBlobs: ", len(blobs), "gasPrice: ", gasPrice)
164+
d.logger.Info(fmt.Sprintf("SubmitWithOptions called, numBlobs: %d, gasPrice: %f", len(blobs), gasPrice))
165165
d.mu.Lock()
166166
defer d.mu.Unlock()
167167
ids := make([]coreda.ID, len(blobs))
@@ -172,13 +172,13 @@ func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, ga
172172

173173
d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob})
174174
}
175-
d.logger.Info("SubmitWithOptions successful, newHeight: ", d.height, "count: ", len(ids))
175+
d.logger.Info(fmt.Sprintf("SubmitWithOptions successful, newHeight: %d, count: %d", d.height, len(ids)))
176176
return ids, nil
177177
}
178178

179179
// Submit stores blobs in DA layer (options are ignored).
180180
func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, _ []byte) ([]coreda.ID, error) {
181-
d.logger.Info("Submit called, numBlobs: ", len(blobs), "gasPrice: ", gasPrice)
181+
d.logger.Info(fmt.Sprintf("Submit called, numBlobs: %d, gasPrice: %f", len(blobs), gasPrice))
182182
d.mu.Lock()
183183
defer d.mu.Unlock()
184184
ids := make([]coreda.ID, len(blobs))
@@ -189,7 +189,7 @@ func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice floa
189189

190190
d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob})
191191
}
192-
d.logger.Info("Submit successful, newHeight: ", d.height, "count: ", len(ids))
192+
d.logger.Info(fmt.Sprintf("Submit successful, newHeight: %d, count: %d", d.height, len(ids)))
193193
return ids, nil
194194
}
195195

da/cmd/local-da/main.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ func main() {
4747
da := NewLocalDA(logger, opts...)
4848

4949
srv := proxy.NewServer(logger, host, port, da)
50-
logger.Info("Listening on", "host", host, "port", port, "maxBlobSize", maxBlobSize)
50+
logger.Info(fmt.Sprintf("Listening on, host: %s, port: %s, maxBlobSize: %d", host, port, maxBlobSize))
5151
if err := srv.Start(context.Background()); err != nil {
52-
logger.Error("error while serving", "error", err)
52+
logger.Error(fmt.Sprintf("error while serving, error: %v", err))
5353
}
5454

5555
interrupt := make(chan os.Signal, 1)

da/jsonrpc/client.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ func (api *API) SubmitWithOptions(ctx context.Context, inputBlobs []da.Blob, gas
158158
continue
159159
}
160160
if currentSize+blobLen > maxBlobSize {
161-
api.Logger.Info("Blob size limit reached for batch", "maxBlobSize", maxBlobSize, "index", i, "currentSize", currentSize, "nextBlobSize", blobLen)
161+
api.Logger.Info(fmt.Sprintf("Blob size limit reached for batch, maxBlobSize: %d, index: %d, currentSize: %d, nextBlobSize: %d", maxBlobSize, i, currentSize, blobLen))
162162
break
163163
}
164164
currentSize += blobLen
@@ -260,7 +260,7 @@ func newClient(ctx context.Context, logger logging.EventLogger, addr string, aut
260260
return nil, fmt.Errorf("failed to decode namespace: %w", err)
261261
}
262262
client.DA.Namespace = namespaceBytes
263-
logger.Info("creating new client", "namespace", namespace)
263+
logger.Info("creating new client, namepsace: ", namespace)
264264
errs := getKnownErrorsMapping()
265265
for name, module := range moduleMap(&client) {
266266
closer, err := jsonrpc.NewMergeClient(ctx, addr, name, []interface{}{module}, authHeader, jsonrpc.WithErrors(errs))

0 commit comments

Comments
 (0)