diff --git a/.vitepress/config.ts b/.vitepress/config.ts index 1abf29877..276fba0fa 100644 --- a/.vitepress/config.ts +++ b/.vitepress/config.ts @@ -206,10 +206,17 @@ function sidebarHome() { link: "/learn/execution" }, { - text: "Resources", + text: "Technical Specifications", collapsed: true, items: [ - { text: "Technical specifications", link: "/learn/specifications" }, + { text: "overview", link: "/learn/specs/overview" }, + { text: "block-manager", link: "/learn/specs/block-manager" }, + { text: "block-validity", link: "/learn/specs/block-validity" }, + { text: "da", link: "/learn/specs/da" }, + { text: "full_node", link: "/learn/specs/full_node" }, + { text: "header-sync", link: "/learn/specs/header-sync" }, + { text: "p2p", link: "/learn/specs/p2p" }, + { text: "store", link: "/learn/specs/store" }, ], }, ], diff --git a/learn/specifications.md b/learn/specifications.md deleted file mode 100644 index 2bde150e6..000000000 --- a/learn/specifications.md +++ /dev/null @@ -1,5 +0,0 @@ -# Technical specifications - -[Rollkit specifications](https://rollkit.github.io/rollkit/index.html) - is comprehensive documentation on the inner components of Rollkit, including data storage, transaction processing, and more. It’s an essential resource for developers looking to understand, contribute to and leverage the full capabilities of Rollkit. - -Additional Rollkit documentation can be found in the [Rollkit godocs](https://pkg.go.dev/github.com/rollkit/rollkit). diff --git a/learn/specs/block-manager.md b/learn/specs/block-manager.md new file mode 100644 index 000000000..d2a643aca --- /dev/null +++ b/learn/specs/block-manager.md @@ -0,0 +1,599 @@ +# Block Manager + +## Abstract + +The block manager is a key component of full nodes and is responsible for block production or block syncing depending on the node type: sequencer or non-sequencer. Block syncing in this context includes retrieving the published blocks from the network (P2P network or DA network), validating them to raise fraud proofs upon validation failure, updating the state, and storing the validated blocks. A full node invokes multiple block manager functionalities in parallel, such as: + +* Block Production (only for sequencer full nodes) +* Block Publication to DA network +* Block Retrieval from DA network +* Block Sync Service +* Block Publication to P2P network +* Block Retrieval from P2P network +* State Update after Block Retrieval + +```mermaid +sequenceDiagram + title Overview of Block Manager + + participant User + participant Sequencer + participant Full Node 1 + participant Full Node 2 + participant DA Layer + + User->>Sequencer: Send Tx + Sequencer->>Sequencer: Generate Block + Sequencer->>DA Layer: Publish Block + + Sequencer->>Full Node 1: Gossip Block + Sequencer->>Full Node 2: Gossip Block + Full Node 1->>Full Node 1: Verify Block + Full Node 1->>Full Node 2: Gossip Block + Full Node 1->>Full Node 1: Mark Block Soft Confirmed + + Full Node 2->>Full Node 2: Verify Block + Full Node 2->>Full Node 2: Mark Block Soft Confirmed + + DA Layer->>Full Node 1: Retrieve Block + Full Node 1->>Full Node 1: Mark Block DA Included + + DA Layer->>Full Node 2: Retrieve Block + Full Node 2->>Full Node 2: Mark Block DA Included +``` + +### Component Architecture Overview + +```mermaid +flowchart TB + subgraph Block Manager Components + BM[Block Manager] + AGG[Aggregation] + REP[Reaper] + SUB[Submitter] + RET[Retriever] + SYNC[Sync Loop] + DAI[DA Includer] + end + + subgraph External Components + EX[Executor] + SEQ[Sequencer] + DA[DA Layer] + HS[Header Store/P2P] + DS[Data Store/P2P] + ST[Local Store] + end + + REP -->|GetTxs| EX + REP -->|SubmitBatch| SEQ + REP -->|Notify| AGG + + AGG -->|CreateBlock| BM + BM -->|ApplyBlock| EX + BM -->|Save| ST + + BM -->|Headers| SUB + BM -->|Data| SUB + SUB -->|Submit| DA + + RET -->|Retrieve| DA + RET -->|Headers/Data| SYNC + + HS -->|Headers| SYNC + DS -->|Data| SYNC + + SYNC -->|Complete Blocks| BM + SYNC -->|DA Included| DAI + DAI -->|SetFinal| EX +``` + +## Protocol/Component Description + +The block manager is initialized using several parameters as defined below: + +**Name**|**Type**|**Description** +|-----|-----|-----| +signing key|crypto.PrivKey|used for signing blocks and data after creation +config|config.BlockManagerConfig|block manager configurations (see config options below) +genesis|*cmtypes.GenesisDoc|initialize the block manager with genesis state (genesis configuration defined in `config/genesis.json` file under the app directory) +store|store.Store|local datastore for storing rollup blocks and states (default local store path is `$db_dir/rollkit` and `db_dir` specified in the `config.yaml` file under the app directory) +mempool, proxyapp, eventbus|mempool.Mempool, proxy.AppConnConsensus, *cmtypes.EventBus|for initializing the executor (state transition function). mempool is also used in the manager to check for availability of transactions for lazy block production +dalc|da.DAClient|the data availability light client used to submit and retrieve blocks to DA network +headerStore|*goheaderstore.Store[*types.SignedHeader]|to store and retrieve block headers gossiped over the P2P network +dataStore|*goheaderstore.Store[*types.SignedData]|to store and retrieve block data gossiped over the P2P network +signaturePayloadProvider|types.SignaturePayloadProvider|optional custom provider for header signature payloads +sequencer|core.Sequencer|used to retrieve batches of transactions from the sequencing layer +reaper|*Reaper|component that periodically retrieves transactions from the executor and submits them to the sequencer + +Block manager configuration options: + +|Name|Type|Description| +|-----|-----|-----| +|BlockTime|time.Duration|time interval used for block production and block retrieval from block store ([`defaultBlockTime`][defaultBlockTime])| +|DABlockTime|time.Duration|time interval used for both block publication to DA network and block retrieval from DA network ([`defaultDABlockTime`][defaultDABlockTime])| +|DAStartHeight|uint64|block retrieval from DA network starts from this height| +|LazyBlockInterval|time.Duration|time interval used for block production in lazy aggregator mode even when there are no transactions ([`defaultLazyBlockTime`][defaultLazyBlockTime])| +|LazyMode|bool|when set to true, enables lazy aggregation mode which produces blocks only when transactions are available or at LazyBlockInterval intervals| +|MaxPendingHeadersAndData|uint64|maximum number of pending headers and data blocks before pausing block production (default: 100)| +|GasPrice|float64|gas price for DA submissions (-1 for automatic/default)| +|GasMultiplier|float64|multiplier for gas price on DA submission retries (default: 1.3)| +|Namespace|da.Namespace|DA namespace ID for block submissions| + +### Block Production + +When the full node is operating as a sequencer (aka aggregator), the block manager runs the block production logic. There are two modes of block production, which can be specified in the block manager configurations: `normal` and `lazy`. + +In `normal` mode, the block manager runs a timer, which is set to the `BlockTime` configuration parameter, and continuously produces blocks at `BlockTime` intervals. + +In `lazy` mode, the block manager implements a dual timer mechanism: + +```mermaid +flowchart LR + subgraph Lazy Aggregation Mode + R[Reaper] -->|GetTxs| E[Executor] + E -->|Txs Available| R + R -->|Submit to Sequencer| S[Sequencer] + R -->|NotifyNewTransactions| N[txNotifyCh] + + N --> A{Aggregation Logic} + BT[blockTimer] --> A + LT[lazyTimer] --> A + + A -->|Txs Available| P1[Produce Block with Txs] + A -->|No Txs & LazyTimer| P2[Produce Empty Block] + + P1 --> B[Block Creation] + P2 --> B + end +``` + +1. A `blockTimer` that triggers block production at regular intervals when transactions are available +2. A `lazyTimer` that ensures blocks are produced at `LazyBlockInterval` intervals even during periods of inactivity + +The block manager starts building a block when any transaction becomes available in the mempool via a notification channel (`txNotifyCh`). When the `Reaper` detects new transactions, it calls `Manager.NotifyNewTransactions()`, which performs a non-blocking signal on this channel. The block manager also produces empty blocks at regular intervals to maintain consistency with the DA layer, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks. + +The Reaper component periodically retrieves transactions from the executor and submits them to the sequencer. It runs independently and notifies the block manager when new transactions are available, enabling responsive block production in lazy mode. + +#### Building the Block + +The block manager of the sequencer nodes performs the following steps to produce a block: + +```mermaid +flowchart TD + A[Timer Trigger / Transaction Notification] --> B[Retrieve Batch] + B --> C{Transactions Available?} + C -->|Yes| D[Create Block with Txs] + C -->|No| E[Create Empty Block] + D --> F[Generate Header & Data] + E --> F + F --> G[Sign Header → SignedHeader] + F --> H[Sign Data → SignedData] + G --> I[Apply Block] + H --> I + I --> J[Update State] + J --> K[Save to Store] + K --> L[Add to pendingHeaders] + K --> M[Add to pendingData] + L --> N[Broadcast Header to P2P] + M --> O[Broadcast Data to P2P] +``` + +* Retrieve a batch of transactions using `retrieveBatch()` which interfaces with the sequencer +* Call `CreateBlock` using executor with the retrieved transactions +* Create separate header and data structures from the block +* Sign the header using `signing key` to generate `SignedHeader` +* Sign the data using `signing key` to generate `SignedData` (if transactions exist) +* Call `ApplyBlock` using executor to generate an updated state +* Save the block, validators, and updated state to local store +* Add the newly generated header to `pendingHeaders` queue +* Add the newly generated data to `pendingData` queue (if not empty) +* Publish the newly generated header and data to channels to notify other components of the sequencer node (such as block and header gossip) + +Note: When no transactions are available, the block manager creates blocks with empty data using a special `dataHashForEmptyTxs` marker. The header and data separation architecture allows headers and data to be submitted and retrieved independently from the DA layer. + +### Block Publication to DA Network + +The block manager of the sequencer full nodes implements separate submission loops for headers and data, both operating at `DABlockTime` intervals: + +```mermaid +flowchart LR + subgraph Header Submission + H1[pendingHeaders Queue] --> H2[Header Submission Loop] + H2 --> H3[Marshal to Protobuf] + H3 --> H4[Submit to DA] + H4 -->|Success| H5[Remove from Queue] + H4 -->|Failure| H6[Keep in Queue & Retry] + end + + subgraph Data Submission + D1[pendingData Queue] --> D2[Data Submission Loop] + D2 --> D3[Marshal to Protobuf] + D3 --> D4[Submit to DA] + D4 -->|Success| D5[Remove from Queue] + D4 -->|Failure| D6[Keep in Queue & Retry] + end + + H2 -.->|DABlockTime| H2 + D2 -.->|DABlockTime| D2 +``` + +#### Header Submission Loop + +The `HeaderSubmissionLoop` manages the submission of signed headers to the DA network: + +* Retrieves pending headers from the `pendingHeaders` queue +* Marshals headers to protobuf format +* Submits to DA using the generic `submitToDA` helper +* On success, removes submitted headers from the pending queue +* On failure, headers remain in the queue for retry + +#### Data Submission Loop + +The `DataSubmissionLoop` manages the submission of signed data to the DA network: + +* Retrieves pending data from the `pendingData` queue +* Marshals data to protobuf format +* Submits to DA using the generic `submitToDA` helper +* On success, removes submitted data from the pending queue +* On failure, data remains in the queue for retry + +#### Generic Submission Logic + +Both loops use a shared `submitToDA` function that provides: + +* Retry logic with [`maxSubmitAttempts`][maxSubmitAttempts] attempts +* Exponential backoff starting at [`initialBackoff`][initialBackoff], doubling each attempt, capped at `DABlockTime` +* Gas price management with `GasMultiplier` applied on retries +* Comprehensive metrics tracking for attempts, successes, and failures +* Context-aware cancellation support + +The manager enforces a limit on pending headers and data through `MaxPendingHeadersAndData` configuration. When this limit is reached, block production pauses to prevent unbounded growth of the pending queues. + +### Block Retrieval from DA Network + +The block manager implements a `RetrieveLoop` that regularly pulls headers and data from the DA network: + +```mermaid +flowchart TD + A[Start RetrieveLoop] --> B[Get DA Height] + B --> C{DABlockTime Timer} + C --> D[GetHeightPair from DA] + D --> E{Result?} + E -->|Success| F[Validate Signatures] + E -->|NotFound| G[Increment Height] + E -->|Error| H[Retry Logic] + + F --> I[Check Sequencer Info] + I --> J[Mark DA Included] + J --> K[Send to Sync] + K --> L[Increment Height] + L --> M[Immediate Next Retrieval] + + G --> C + H --> N{Retries < 10?} + N -->|Yes| O[Wait 100ms] + N -->|No| P[Log Error & Stall] + O --> D + M --> D +``` + +#### Retrieval Process + +1. **Height Management**: Starts from the latest of: + * DA height from the last state in local store + * `DAStartHeight` configuration parameter + * Maintains and increments `daHeight` counter after successful retrievals + +2. **Retrieval Mechanism**: + * Executes at `DABlockTime` intervals + * Makes `GetHeightPair(daHeight)` request to get both header and data + * Handles three possible outcomes: + * `Success`: Process retrieved header and data + * `NotFound`: No rollup block at this DA height (normal case) + * `Error`: Retry with backoff + +3. **Error Handling**: + * Implements retry logic with 100ms delay between attempts + * After 10 retries, logs error and stalls retrieval + * Does not increment `daHeight` on persistent errors + +4. **Processing Retrieved Blocks**: + * Validates header and data signatures + * Checks sequencer information + * Marks blocks as DA included in caches + * Sends to sync goroutine for state update + * Successful processing triggers immediate next retrieval without waiting for timer + +#### Header and Data Caching + +The retrieval system uses persistent caches for both headers and data: + +* Prevents duplicate processing +* Tracks DA inclusion status +* Supports out-of-order block arrival +* Enables efficient sync from P2P and DA sources + +For more details on DA integration, see the [Data Availability specification](./da.md). + +#### Out-of-Order Rollup Blocks on DA + +Rollkit should support blocks arriving out-of-order on DA, like so: +![out-of-order blocks](./out-of-order-blocks.png) + +#### Termination Condition + +If the sequencer double-signs two blocks at the same height, evidence of the fault should be posted to DA. Rollkit full nodes should process the longest valid chain up to the height of the fault evidence, and terminate. See diagram: +![termination condition](./termination.png) + +### Block Sync Service + +The block sync service manages the synchronization of headers and data through separate stores and channels: + +#### Architecture + +* **Header Store**: Uses `goheader.Store[*types.SignedHeader]` for header management +* **Data Store**: Uses `goheader.Store[*types.SignedData]` for data management +* **Separation of Concerns**: Headers and data are handled independently, supporting the header/data separation architecture + +#### Synchronization Flow + +1. **Header Sync**: Headers created by the sequencer are sent to the header store for P2P gossip +2. **Data Sync**: Data blocks are sent to the data store for P2P gossip +3. **Cache Integration**: Both header and data caches track seen items to prevent duplicates +4. **DA Inclusion Tracking**: Separate tracking for header and data DA inclusion status + +### Block Publication to P2P network + +The sequencer publishes headers and data separately to the P2P network: + +#### Header Publication + +* Headers are sent through the header broadcast channel +* Written to the header store for P2P gossip +* Broadcast to network peers via header sync service + +#### Data Publication + +* Data blocks are sent through the data broadcast channel +* Written to the data store for P2P gossip +* Broadcast to network peers via data sync service + +Non-sequencer full nodes receive headers and data through the P2P sync service and do not publish blocks themselves. + +### Block Retrieval from P2P network + +Non-sequencer full nodes retrieve headers and data separately from P2P stores: + +#### Header Store Retrieval Loop + +The `HeaderStoreRetrieveLoop`: + +* Operates at `BlockTime` intervals via `headerStoreCh` signals +* Tracks `headerStoreHeight` for the last retrieved header +* Retrieves all headers between last height and current store height +* Validates sequencer information using `isUsingExpectedSingleSequencer` +* Marks headers as "seen" in the header cache +* Sends headers to sync goroutine via `headerInCh` + +#### Data Store Retrieval Loop + +The `DataStoreRetrieveLoop`: + +* Operates at `BlockTime` intervals via `dataStoreCh` signals +* Tracks `dataStoreHeight` for the last retrieved data +* Retrieves all data blocks between last height and current store height +* Validates data signatures using `isValidSignedData` +* Marks data as "seen" in the data cache +* Sends data to sync goroutine via `dataInCh` + +#### Soft Confirmations + +Headers and data retrieved from P2P are marked as soft confirmed until both: + +1. The corresponding header is seen on the DA layer +2. The corresponding data is seen on the DA layer + +Once both conditions are met, the block is marked as DA-included. + +#### About Soft Confirmations and DA Inclusions + +The block manager retrieves blocks from both the P2P network and the underlying DA network because the blocks are available in the P2P network faster and DA retrieval is slower (e.g., 1 second vs 6 seconds). +The blocks retrieved from the P2P network are only marked as soft confirmed until the DA retrieval succeeds on those blocks and they are marked DA-included. +DA-included blocks are considered to have a higher level of finality. + +**DAIncluderLoop**: +The `DAIncluderLoop` is responsible for advancing the `DAIncludedHeight` by: + +* Checking if blocks after the current height have both header and data marked as DA-included in caches +* Stopping advancement if either header or data is missing for a height +* Calling `SetFinal` on the executor when a block becomes DA-included +* Storing the Rollkit height to DA height mapping for tracking +* Ensuring only blocks with both header and data present are considered DA-included + +### State Update after Block Retrieval + +The block manager uses a `SyncLoop` to coordinate state updates from blocks retrieved via P2P or DA networks: + +```mermaid +flowchart TD + subgraph Sources + P1[P2P Header Store] --> H[headerInCh] + P2[P2P Data Store] --> D[dataInCh] + DA1[DA Header Retrieval] --> H + DA2[DA Data Retrieval] --> D + end + + subgraph SyncLoop + H --> S[Sync Goroutine] + D --> S + S --> C{Header & Data for Same Height?} + C -->|Yes| R[Reconstruct Block] + C -->|No| W[Wait for Matching Pair] + R --> V[Validate Signatures] + V --> A[ApplyBlock] + A --> CM[Commit] + CM --> ST[Store Block & State] + ST --> F{DA Included?} + F -->|Yes| FN[SetFinal] + F -->|No| E[End] + FN --> U[Update DA Height] + end +``` + +#### Sync Loop Architecture + +The `SyncLoop` processes headers and data from multiple sources: + +* Headers from `headerInCh` (P2P and DA sources) +* Data from `dataInCh` (P2P and DA sources) +* Maintains caches to track processed items +* Ensures ordered processing by height + +#### State Update Process + +When both header and data are available for a height: + +1. **Block Reconstruction**: Combines header and data into a complete block +2. **Validation**: Verifies header and data signatures match expectations +3. **ApplyBlock**: + * Validates the block against current state + * Executes transactions + * Captures validator updates + * Returns updated state +4. **Commit**: + * Persists execution results + * Updates mempool by removing included transactions + * Publishes block events +5. **Storage**: + * Stores the block, validators, and updated state + * Updates last state in manager +6. **Finalization**: + * When block is DA-included, calls `SetFinal` on executor + * Updates DA included height + +## Message Structure/Communication Format + +The communication between the block manager and executor: + +* `InitChain`: initializes the chain state with the given genesis time, initial height, and chain ID using `InitChainSync` on the executor to obtain initial `appHash` and initialize the state. +* `CreateBlock`: prepares a block with transactions from the provided batch data. +* `ApplyBlock`: validates the block, executes the block (apply transactions), captures validator updates, and returns updated state. +* `SetFinal`: marks the block as final when both its header and data are confirmed on the DA layer. +* `GetTxs`: retrieves transactions from the application (used by Reaper component). + +The communication with the sequencer: + +* `GetNextBatch`: retrieves the next batch of transactions to include in a block. +* `VerifyBatch`: validates that a batch came from the expected sequencer. + +The communication with DA layer: + +* `Submit`: submits headers or data blobs to the DA network. +* `Get`: retrieves headers or data blobs from the DA network. +* `GetHeightPair`: retrieves both header and data at a specific DA height. + +## Assumptions and Considerations + +* The block manager loads the initial state from the local store and uses genesis if not found in the local store, when the node (re)starts. +* The default mode for sequencer nodes is normal (not lazy). +* The sequencer can produce empty blocks. +* In lazy aggregation mode, the block manager maintains consistency with the DA layer by producing empty blocks at regular intervals, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks. +* The lazy aggregation mechanism uses a dual timer approach: + * A `blockTimer` that triggers block production when transactions are available + * A `lazyTimer` that ensures blocks are produced even during periods of inactivity +* Empty batches are handled differently in lazy mode - instead of discarding them, they are returned with the `ErrNoBatch` error, allowing the caller to create empty blocks with proper timestamps. +* Transaction notifications from the `Reaper` to the `Manager` are handled via a non-blocking notification channel (`txNotifyCh`) to prevent backpressure. +* The block manager enforces `MaxPendingHeadersAndData` limit to prevent unbounded growth of pending queues during DA submission issues. +* Headers and data are submitted separately to the DA layer, supporting the header/data separation architecture. +* The block manager uses persistent caches for headers and data to track seen items and DA inclusion status. +* Gas price management includes automatic adjustment with `GasMultiplier` on DA submission retries. +* The block manager uses persistent storage (disk) when the `root_dir` and `db_path` configuration parameters are specified in `config.yaml` file under the app directory. If these configuration parameters are not specified, the in-memory storage is used, which will not be persistent if the node stops. +* The block manager does not re-apply blocks when they transition from soft confirmed to DA included status. The block is only marked DA included in the caches. +* Header and data stores use separate prefixes for isolation in the underlying database. +* The genesis `ChainID` is used to create separate `PubSubTopID`s for headers and data in go-header. +* Block sync over the P2P network works only when a full node is connected to the P2P network by specifying the initial seeds to connect to via `P2PConfig.Seeds` configuration parameter when starting the full node. +* Node's context is passed down to all components to support graceful shutdown and cancellation. +* The block manager supports custom signature payload providers for headers, enabling flexible signing schemes. +* The block manager supports the separation of header and data structures in Rollkit. This allows for expanding the sequencing scheme beyond single sequencing and enables the use of a decentralized sequencer mode. For detailed information on this architecture, see the [Header and Data Separation ADR](../../lazy-adr/adr-014-header-and-data-separation.md). +* The block manager processes blocks with a minimal header format, which is designed to eliminate dependency on CometBFT's header format and can be used to produce an execution layer tailored header if needed. For details on this header structure, see the [Rollkit Minimal Header](../../lazy-adr/adr-015-rollkit-minimal-header.md) specification. + +## Metrics + +The block manager exposes comprehensive metrics for monitoring: + +### Block Production Metrics + +* `last_block_produced_height`: Height of the last produced block +* `last_block_produced_time`: Timestamp of the last produced block +* `aggregation_type`: Current aggregation mode (normal/lazy) +* `block_size_bytes`: Size distribution of produced blocks +* `produced_empty_blocks_total`: Count of empty blocks produced + +### DA Metrics + +* `da_submission_attempts_total`: Total DA submission attempts +* `da_submission_success_total`: Successful DA submissions +* `da_submission_failure_total`: Failed DA submissions +* `da_retrieval_attempts_total`: Total DA retrieval attempts +* `da_retrieval_success_total`: Successful DA retrievals +* `da_retrieval_failure_total`: Failed DA retrievals +* `da_height`: Current DA retrieval height +* `pending_headers_count`: Number of headers pending DA submission +* `pending_data_count`: Number of data blocks pending DA submission + +### Sync Metrics + +* `sync_height`: Current sync height +* `da_included_height`: Height of last DA-included block +* `soft_confirmed_height`: Height of last soft confirmed block +* `header_store_height`: Current header store height +* `data_store_height`: Current data store height + +### Performance Metrics + +* `block_production_time`: Time to produce a block +* `da_submission_time`: Time to submit to DA +* `state_update_time`: Time to apply block and update state +* `channel_buffer_usage`: Usage of internal channels + +### Error Metrics + +* `errors_total`: Total errors by type and operation + +## Implementation + +See [block-manager] + +See [tutorial] for running a multi-node network with both sequencer and non-sequencer full nodes. + +## References + +[1] [Go Header][go-header] + +[2] [Block Sync][block-sync] + +[3] [Full Node][full-node] + +[4] [Block Manager][block-manager] + +[5] [Tutorial][tutorial] + +[6] [Header and Data Separation ADR](../../lazy-adr/adr-014-header-and-data-separation.md) + +[7] [Rollkit Minimal Header](../../lazy-adr/adr-015-rollkit-minimal-header.md) + +[8] [Data Availability](./da.md) + +[9] [Lazy Aggregation with DA Layer Consistency ADR](../../lazy-adr/adr-021-lazy-aggregation.md) + +[maxSubmitAttempts]: https://github.com/rollkit/rollkit/blob/main/block/manager.go#L50 +[defaultBlockTime]: https://github.com/rollkit/rollkit/blob/main/block/manager.go#L36 +[defaultDABlockTime]: https://github.com/rollkit/rollkit/blob/main/block/manager.go#L33 +[defaultLazyBlockTime]: https://github.com/rollkit/rollkit/blob/main/block/manager.go#L39 +[initialBackoff]: https://github.com/rollkit/rollkit/blob/main/block/manager.go#L59 +[go-header]: https://github.com/celestiaorg/go-header +[block-sync]: https://github.com/rollkit/rollkit/blob/main/pkg/sync/sync_service.go +[full-node]: https://github.com/rollkit/rollkit/blob/main/node/full.go +[block-manager]: https://github.com/rollkit/rollkit/blob/main/block/manager.go +[tutorial]: https://rollkit.dev/guides/full-node diff --git a/learn/specs/block-validity.md b/learn/specs/block-validity.md new file mode 100644 index 000000000..f06e569e3 --- /dev/null +++ b/learn/specs/block-validity.md @@ -0,0 +1,130 @@ +# Block and Header Validity + +## Abstract + +Like all blockchains, rollups are defined as the chain of **valid** blocks from the genesis, to the head. Thus, the block and header validity rules define the chain. + +Verifying a block/header is done in 3 parts: + +1. Verify correct serialization according to the protobuf spec + +2. Perform basic validation of the types + +3. Perform verification of the new block against the previously accepted block + +Rollkit uses a header/data separation architecture where headers and data can be validated independently. The system has moved from a multi-validator model to a single signer model for simplified sequencer management. + +## Basic Validation + +Each type contains a `.ValidateBasic()` method, which verifies that certain basic invariants hold. The `ValidateBasic()` calls are nested for each structure. + +### SignedHeader Validation + +```go +SignedHeader.ValidateBasic() + // Make sure the SignedHeader's Header passes basic validation + Header.ValidateBasic() + verify ProposerAddress not nil + // Make sure the SignedHeader's signature passes basic validation + Signature.ValidateBasic() + // Ensure that someone signed the header + verify len(c.Signatures) not 0 + // For based rollups (sh.Signer.IsEmpty()), pass validation + if !sh.Signer.IsEmpty(): + // Verify the signer matches the proposer address + verify sh.Signer.Address == sh.ProposerAddress + // Verify signature using custom verifier if set, otherwise use default + if sh.verifier != nil: + verify sh.verifier(sh) == nil + else: + verify sh.Signature.Verify(sh.Signer.PubKey, sh.Header.MarshalBinary()) +``` + +### SignedData Validation + +```go +SignedData.ValidateBasic() + // Always passes basic validation for the Data itself + Data.ValidateBasic() // always passes + // Make sure the signature is valid + Signature.ValidateBasic() + verify len(c.Signatures) not 0 + // Verify the signer + If !sd.Signer.IsEmpty(): + verify sd.Signature.Verify(sd.Signer.PubKey, sd.Data.MarshalBinary()) +``` + +### Block Validation + +Blocks are composed of SignedHeader and Data: + +```go +// Block validation happens by validating header and data separately +// then ensuring data hash matches +verify SignedHeader.ValidateBasic() == nil +verify Data.Hash() == SignedHeader.DataHash +``` + +## Verification Against Previous Block + +```go +SignedHeader.Verify(untrustedHeader *SignedHeader) + // Basic validation is handled by go-header before this + Header.Verify(untrustedHeader) + // Verify height sequence + if untrustedHeader.Height != h.Height + 1: + if untrustedHeader.Height > h.Height + 1: + return soft verification failure + return error "headers are not adjacent" + // Verify the link to previous header + verify untrustedHeader.LastHeaderHash == h.Header.Hash() + // Verify LastCommit hash matches previous signature + verify untrustedHeader.LastCommitHash == sh.Signature.GetCommitHash(...) + // Note: ValidatorHash field exists for compatibility but is not validated +``` + +## [Data](https://github.com/rollkit/rollkit/blob/main/types/data.go) + +| **Field Name** | **Valid State** | **Validation** | +|----------------|-----------------------------------------|------------------------------------| +| Txs | Transaction data of the block | Data.Hash() == SignedHeader.DataHash | +| Metadata | Optional p2p gossiping metadata | Not validated | + +## [SignedHeader](https://github.com/rollkit/rollkit/blob/main/types/signed_header.go) + +| **Field Name** | **Valid State** | **Validation** | +|----------------|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------| +| Header | Valid header for the block | `Header` passes `ValidateBasic()` and `Verify()` | +| Signature | Valid signature from the single sequencer | `Signature` passes `ValidateBasic()`, verified against signer | +| Signer | Information about who signed the header | Must match ProposerAddress if not empty (based rollup case) | +| verifier | Optional custom signature verification function | Used instead of default verification if set | + +## [Header](https://github.com/rollkit/rollkit/blob/main/types/header.go) + +***Note***: Rollkit has moved to a single signer model. The multi-validator architecture has been replaced with a simpler single sequencer approach. + +| **Field Name** | **Valid State** | **Validation** | +|---------------------|--------------------------------------------------------------------------------------------|---------------------------------------| +| **BaseHeader** | | | +| Height | Height of the previous accepted header, plus 1. | checked in the `Verify()`` step | +| Time | Timestamp of the block | Not validated in Rollkit | +| ChainID | The hard-coded ChainID of the chain | Should be checked as soon as the header is received | +| **Header** . | | | +| Version | unused | | +| LastHeaderHash | The hash of the previous accepted block | checked in the `Verify()`` step | +| LastCommitHash | The hash of the previous accepted block's commit | checked in the `Verify()`` step | +| DataHash | Correct hash of the block's Data field | checked in the `ValidateBasic()`` step | +| ConsensusHash | unused | | +| AppHash | The correct state root after executing the block's transactions against the accepted state | checked during block execution | +| LastResultsHash | Correct results from executing transactions | checked during block execution | +| ProposerAddress | Address of the expected proposer | Must match Signer.Address in SignedHeader | +| ValidatorHash | Compatibility field for Tendermint light client | Not validated | + +## [Signer](https://github.com/rollkit/rollkit/blob/main/types/signed_header.go) + +The Signer type replaces the previous ValidatorSet for single sequencer operation: + +| **Field Name** | **Valid State** | **Validation** | +|----------------|-----------------------------------------------------------------|-----------------------------| +| PubKey | Public key of the signer | Must not be nil if Signer is not empty | +| Address | Address derived from the public key | Must match ProposerAddress | diff --git a/learn/specs/da.md b/learn/specs/da.md new file mode 100644 index 000000000..19f28d340 --- /dev/null +++ b/learn/specs/da.md @@ -0,0 +1,33 @@ +# DA + +Rollkit provides a generic [data availability interface][da-interface] for modular blockchains. Any DA that implements this interface can be used with Rollkit. + +## Details + +`Client` can connect via JSON-RPC transports using Rollkit's [jsonrpc][jsonrpc] implementations. The connection can be configured using the following cli flags: + +* `--rollkit.da.address`: url address of the DA service (default: "grpc://localhost:26650") +* `--rollkit.da.auth_token`: authentication token of the DA service +* `--rollkit.da.namespace`: namespace to use when submitting blobs to the DA service + +Given a set of blocks to be submitted to DA by the block manager, the `SubmitBlocks` first encodes the blocks using protobuf (the encoded data are called blobs) and invokes the `Submit` method on the underlying DA implementation. On successful submission (`StatusSuccess`), the DA block height which included in the blocks is returned. + +To make sure that the serialised blocks don't exceed the underlying DA's blob limits, it fetches the blob size limit by calling `Config` which returns the limit as `uint64` bytes, then includes serialised blocks until the limit is reached. If the limit is reached, it submits the partial set and returns the count of successfully submitted blocks as `SubmittedCount`. The caller should retry with the remaining blocks until all the blocks are submitted. If the first block itself is over the limit, it throws an error. + +The `Submit` call may result in an error (`StatusError`) based on the underlying DA implementations on following scenarios: + +* the total blobs size exceeds the underlying DA's limits (includes empty blobs) +* the implementation specific failures, e.g., for [celestia-da-json-rpc][jsonrpc], invalid namespace, unable to create the commitment or proof, setting low gas price, etc, could return error. + +The `RetrieveBlocks` retrieves the blocks for a given DA height using `GetIDs` and `Get` methods. If there are no blocks available for a given DA height, `StatusNotFound` is returned (which is not an error case). The retrieved blobs are converted back to blocks and returned on successful retrieval. + +Both `SubmitBlocks` and `RetrieveBlocks` may be unsuccessful if the DA node and the DA blockchain that the DA implementation is using have failures. For example, failures such as, DA mempool is full, DA submit transaction is nonce clashing with other transaction from the DA submitter account, DA node is not synced, etc. + +## References + +[1] [da-interface][da-interface] + +[2] [jsonrpc][jsonrpc] + +[da-interface]: https://github.com/rollkit/rollkit/blob/main/core/da/da.go#L11 +[jsonrpc]: https://github.com/rollkit/rollkit/tree/main/da/jsonrpc diff --git a/learn/specs/full_node.md b/learn/specs/full_node.md new file mode 100644 index 000000000..b7e5d510b --- /dev/null +++ b/learn/specs/full_node.md @@ -0,0 +1,99 @@ +# Full Node + +## Abstract + +A Full Node is a top-level service that encapsulates different components of Rollkit and initializes/manages them. + +## Details + +### Full Node Details + +A Full Node is initialized inside the Cosmos SDK start script along with the node configuration, a private key to use in the P2P client, a private key for signing blocks as a block proposer, a client creator, a genesis document, and a logger. It uses them to initialize the components described above. The components TxIndexer, BlockIndexer, and IndexerService exist to ensure cometBFT compatibility since they are needed for most of the RPC calls from the `SignClient` interface from cometBFT. + +Note that unlike a light node which only syncs and stores block headers seen on the P2P layer, the full node also syncs and stores full blocks seen on both the P2P network and the DA layer. Full blocks contain all the transactions published as part of the block. + +The Full Node mainly encapsulates and initializes/manages the following components: + +### genesisDoc + +The [genesis] document contains information about the initial state of the chain, in particular its validator set. + +### conf + +The [node configuration] contains all the necessary settings for the node to be initialized and function properly. + +### P2P + +The [peer-to-peer client] is used to gossip transactions between full nodes in the network. + +### Store + +The [Store] is initialized with `DefaultStore`, an implementation of the [store interface] which is used for storing and retrieving blocks, commits, and state. | + +### blockManager + +The [Block Manager] is responsible for managing block-related operations including: + +- Block production (normal and lazy modes) +- Header and data submission to DA layer +- Block retrieval and synchronization +- State updates and finalization + +It implements a header/data separation architecture where headers and transaction data are handled independently. + +### dalc + +The [Data Availability Layer Client][dalc] is used to interact with the data availability layer. It is initialized with the DA Layer and DA Config specified in the node configuration. + +### hSyncService + +The [Header Sync Service] is used for syncing signed headers between nodes over P2P. It operates independently from data sync to support light clients. + +### dSyncService + +The [Data Sync Service] is used for syncing transaction data between nodes over P2P. This service is only used by full nodes, not light nodes. + +## Message Structure/Communication Format + +The Full Node communicates with other nodes in the network using the P2P client. It also communicates with the application using the ABCI proxy connections. The communication format is based on the P2P and ABCI protocols. + +## Assumptions and Considerations + +The Full Node assumes that the configuration, private keys, client creator, genesis document, and logger are correctly passed in by the Cosmos SDK. It also assumes that the P2P client, data availability layer client, block manager, and other services can be started and stopped without errors. + +## Implementation + +See [full node] + +## References + +[1] [Full Node][full node] + +[2] [Genesis Document][genesis] + +[3] [Node Configuration][node configuration] + +[4] [Peer to Peer Client][peer-to-peer client] + +[5] [Store][Store] + +[6] [Store Interface][store interface] + +[7] [Block Manager][block manager] + +[8] [Data Availability Layer Client][dalc] + +[9] [Header Sync Service][Header Sync Service] + +[10] [Data Sync Service][Data Sync Service] + +[full node]: https://github.com/rollkit/rollkit/blob/main/node/full.go +[genesis]: https://github.com/cometbft/cometbft/blob/main/spec/core/genesis.md +[node configuration]: https://github.com/rollkit/rollkit/blob/main/pkg/config/config.go +[peer-to-peer client]: https://github.com/rollkit/rollkit/blob/main/pkg/p2p/client.go +[Store]: https://github.com/rollkit/rollkit/blob/main/pkg/store/store.go +[store interface]: https://github.com/rollkit/rollkit/blob/main/pkg/store/types.go +[Block Manager]: https://github.com/rollkit/rollkit/blob/main/block/manager.go +[dalc]: https://github.com/rollkit/rollkit/blob/main/core/da/da.go +[Header Sync Service]: https://github.com/rollkit/rollkit/blob/main/pkg/sync/sync_service.go +[Data Sync Service]: https://github.com/rollkit/rollkit/blob/main/pkg/sync/sync_service.go diff --git a/learn/specs/header-sync.md b/learn/specs/header-sync.md new file mode 100644 index 000000000..f497ef65b --- /dev/null +++ b/learn/specs/header-sync.md @@ -0,0 +1,109 @@ +# Header and Data Sync + +## Abstract + +The nodes in the P2P network sync headers and data using separate sync services that implement the [go-header][go-header] interface. Rollkit uses a header/data separation architecture where headers and transaction data are synchronized independently through parallel services. Each sync service consists of several components as listed below. + +|Component|Description| +|---|---| +|store| a prefixed [datastore][datastore] where synced items are stored (`headerSync` prefix for headers, `dataSync` prefix for data)| +|subscriber| a [libp2p][libp2p] node pubsub subscriber for the specific data type| +|P2P server| a server for handling requests between peers in the P2P network| +|exchange| a client that enables sending in/out-bound requests from/to the P2P network| +|syncer| a service for efficient synchronization. When a P2P node falls behind and wants to catch up to the latest network head via P2P network, it can use the syncer.| + +## Details + +Rollkit implements two separate sync services: + +### Header Sync Service + +- Synchronizes `SignedHeader` structures containing block headers with signatures +- Used by all node types (sequencer, full, and light) +- Essential for maintaining the canonical view of the chain + +### Data Sync Service + +- Synchronizes `Data` structures containing transaction data +- Used only by full nodes and sequencers +- Light nodes do not run this service as they only need headers + +Both services: + +- Utilize the generic `SyncService[H header.Header[H]]` implementation +- Inherit the `ConnectionGater` from the node's P2P client for peer management +- Use `NodeConfig.BlockTime` to determine outdated items during sync +- Operate independently on separate P2P topics and datastores + +### Consumption of Sync Services + +#### Header Sync + +- Sequencer nodes publish signed headers to the P2P network after block creation +- Full and light nodes receive and store headers for chain validation +- Headers contain commitments (DataHash) that link to the corresponding data + +#### Data Sync + +- Sequencer nodes publish transaction data separately from headers +- Only full nodes receive and store data (light nodes skip this) +- Data is linked to headers through the DataHash commitment + +#### Parallel Broadcasting + +The block manager broadcasts headers and data in parallel when publishing blocks: + +- Headers are sent through `headerBroadcaster` +- Data is sent through `dataBroadcaster` +- This enables efficient network propagation of both components + +## Assumptions + +- Separate datastores are created with different prefixes: + - Headers: `headerSync` prefix on the main datastore + - Data: `dataSync` prefix on the main datastore +- Network IDs are suffixed to distinguish services: + - Header sync: `{network}-headerSync` + - Data sync: `{network}-dataSync` +- Chain IDs for pubsub topics are also separated: + - Headers: `{chainID}-headerSync` creates topic like `/gm-headerSync/header-sub/v0.0.1` + - Data: `{chainID}-dataSync` creates topic like `/gm-dataSync/header-sub/v0.0.1` +- Both stores must be initialized with genesis items before starting: + - Header store needs genesis header + - Data store needs genesis data (if applicable) +- Genesis items can be loaded via `NodeConfig.TrustedHash` or P2P network query +- Sync services work only when connected to P2P network via `P2PConfig.Seeds` +- Node context is passed to all components for graceful shutdown +- Headers and data are linked through DataHash but synced independently + +## Implementation + +The sync service implementation can be found in [pkg/sync/sync_service.go][sync-service]. The generic `SyncService[H header.Header[H]]` is instantiated as: + +- `HeaderSyncService` for syncing `*types.SignedHeader` +- `DataSyncService` for syncing `*types.Data` + +Full nodes create and start both services, while light nodes only start the header sync service. The services are created in [full][fullnode] and [light][lightnode] node implementations. + +The block manager integrates with both services through: + +- `HeaderStoreRetrieveLoop()` for retrieving headers from P2P +- `DataStoreRetrieveLoop()` for retrieving data from P2P +- Separate broadcast channels for publishing headers and data + +## References + +[1] [Header Sync][sync-service] + +[2] [Full Node][fullnode] + +[3] [Light Node][lightnode] + +[4] [go-header][go-header] + +[sync-service]: https://github.com/rollkit/rollkit/blob/main/pkg/sync/sync_service.go +[fullnode]: https://github.com/rollkit/rollkit/blob/main/node/full.go +[lightnode]: https://github.com/rollkit/rollkit/blob/main/node/light.go +[go-header]: https://github.com/celestiaorg/go-header +[libp2p]: https://github.com/libp2p/go-libp2p +[datastore]: https://github.com/ipfs/go-datastore diff --git a/learn/specs/out-of-order-blocks.png b/learn/specs/out-of-order-blocks.png new file mode 100644 index 000000000..fa7a955cb Binary files /dev/null and b/learn/specs/out-of-order-blocks.png differ diff --git a/learn/specs/p2p.md b/learn/specs/p2p.md new file mode 100644 index 000000000..0e96f065c --- /dev/null +++ b/learn/specs/p2p.md @@ -0,0 +1,60 @@ +# P2P + +Every node (both full and light) runs a P2P client using [go-libp2p][go-libp2p] P2P networking stack for gossiping transactions in the chain's P2P network. The same P2P client is also used by the header and block sync services for gossiping headers and blocks. + +Following parameters are required for creating a new instance of a P2P client: + +* P2PConfig (described below) +* [go-libp2p][go-libp2p] private key used to create a libp2p connection and join the p2p network. +* chainID: identifier used as namespace within the p2p network for peer discovery. The namespace acts as a sub network in the p2p network, where peer connections are limited to the same namespace. +* datastore: an instance of [go-datastore][go-datastore] used for creating a connection gator and stores blocked and allowed peers. +* logger + +```go +// P2PConfig stores configuration related to peer-to-peer networking. +type P2PConfig struct { + ListenAddress string // Address to listen for incoming connections + Seeds string // Comma separated list of seed nodes to connect to + BlockedPeers string // Comma separated list of nodes to ignore + AllowedPeers string // Comma separated list of nodes to whitelist +} +``` + +A P2P client also instantiates a [connection gator][conngater] to block and allow peers specified in the `P2PConfig`. + +It also sets up a gossiper using the gossip topic `+` (`txTopicSuffix` is defined in [p2p/client.go][client.go]), a Distributed Hash Table (DHT) using the `Seeds` defined in the `P2PConfig` and peer discovery using go-libp2p's `discovery.RoutingDiscovery`. + +A P2P client provides an interface `SetTxValidator(p2p.GossipValidator)` for specifying a gossip validator which can define how to handle the incoming `GossipMessage` in the P2P network. The `GossipMessage` represents message gossiped via P2P network (e.g. transaction, Block etc). + +```go +// GossipValidator is a callback function type. +type GossipValidator func(*GossipMessage) bool +``` + +The full nodes define a transaction validator (shown below) as gossip validator for processing the gossiped transactions to add to the mempool, whereas light nodes simply pass a dummy validator as light nodes do not process gossiped transactions. + +```go +// newTxValidator creates a pubsub validator that uses the node's mempool to check the +// transaction. If the transaction is valid, then it is added to the mempool +func (n *FullNode) newTxValidator() p2p.GossipValidator { +``` + +```go +// Dummy validator that always returns a callback function with boolean `false` +func (ln *LightNode) falseValidator() p2p.GossipValidator { +``` + +## References + +[1] [client.go][client.go] + +[2] [go-datastore][go-datastore] + +[3] [go-libp2p][go-libp2p] + +[4] [conngater][conngater] + +[client.go]: https://github.com/rollkit/rollkit/blob/main/pkg/p2p/client.go +[go-datastore]: https://github.com/ipfs/go-datastore +[go-libp2p]: https://github.com/libp2p/go-libp2p +[conngater]: https://github.com/libp2p/go-libp2p/tree/master/p2p/net/conngater diff --git a/learn/specs/store.md b/learn/specs/store.md new file mode 100644 index 000000000..b92c8c271 --- /dev/null +++ b/learn/specs/store.md @@ -0,0 +1,92 @@ +# Store + +## Abstract + +The Store interface defines methods for storing and retrieving blocks, commits, and the state of the blockchain. + +## Protocol/Component Description + +The Store interface defines the following methods: + +- `Height`: Returns the height of the highest block in the store. +- `SetHeight`: Sets given height in the store if it's higher than the existing height in the store. +- `SaveBlock`: Saves a block (containing both header and data) along with its seen signature. +- `GetBlock`: Returns a block at a given height. +- `GetBlockByHash`: Returns a block with a given block header hash. + +Note: While blocks are stored as complete units in the store, the block manager handles headers and data separately during synchronization and DA layer interaction. + +- `SaveBlockResponses`: Saves block responses in the Store. +- `GetBlockResponses`: Returns block results at a given height. +- `GetSignature`: Returns a signature for a block at a given height. +- `GetSignatureByHash`: Returns a signature for a block with a given block header hash. +- `UpdateState`: Updates the state saved in the Store. Only one State is stored. +- `GetState`: Returns the last state saved with UpdateState. +- `SaveValidators`: Saves the validator set at a given height. +- `GetValidators`: Returns the validator set at a given height. + +The `TxnDatastore` interface inside [go-datastore] is used for constructing different key-value stores for the underlying storage of a full node. There are two different implementations of `TxnDatastore` in [kv.go]: + +- `NewDefaultInMemoryKVStore`: Builds a key-value store that uses the [BadgerDB] library and operates in-memory, without accessing the disk. Used only across unit tests and integration tests. + +- `NewDefaultKVStore`: Builds a key-value store that uses the [BadgerDB] library and stores the data on disk at the specified path. + +A Rollkit full node is [initialized][full_node_store_initialization] using `NewDefaultKVStore` as the base key-value store for underlying storage. To store various types of data in this base key-value store, different prefixes are used: `mainPrefix`, `dalcPrefix`, and `indexerPrefix`. The `mainPrefix` equal to `0` is used for the main node data, `dalcPrefix` equal to `1` is used for Data Availability Layer Client (DALC) data, and `indexerPrefix` equal to `2` is used for indexing related data. + +For the main node data, `DefaultStore` struct, an implementation of the Store interface, is used with the following prefixes for various types of data within it: + +- `blockPrefix` with value "b": Used to store complete blocks in the key-value store. +- `indexPrefix` with value "i": Used to index the blocks stored in the key-value store. +- `commitPrefix` with value "c": Used to store commits related to the blocks. +- `statePrefix` with value "s": Used to store the state of the blockchain. +- `responsesPrefix` with value "r": Used to store responses related to the blocks. +- `validatorsPrefix` with value "v": Used to store validator sets at a given height. + +Additional prefixes used by sync services: + +- `headerSyncPrefix` with value "hs": Used by the header sync service for P2P synced headers. +- `dataSyncPrefix` with value "ds": Used by the data sync service for P2P synced transaction data. +For example, in a call to `GetBlockByHash` for some block hash ``, the key used in the full node's base key-value store will be `/0/b/` where `0` is the main store prefix and `b` is the block prefix. Similarly, in a call to `GetValidators` for some height ``, the key used in the full node's base key-value store will be `/0/v/` where `0` is the main store prefix and `v` is the validator set prefix. + +Inside the key-value store, the value of these various types of data like `Block` is stored as a byte array which is encoded and decoded using the corresponding Protobuf [marshal and unmarshal methods][serialization]. + +The store is most widely used inside the [block manager] to perform their functions correctly. Within the block manager, since it has multiple go-routines in it, it is protected by a mutex lock, `lastStateMtx`, to synchronize read/write access to it and prevent race conditions. + +## Message Structure/Communication Format + +The Store does not communicate over the network, so there is no message structure or communication format. + +## Assumptions and Considerations + +The Store assumes that the underlying datastore is reliable and provides atomicity for transactions. It also assumes that the data passed to it for storage is valid and correctly formatted. + +## Implementation + +See [Store Interface][store_interface] and [Default Store][default_store] for its implementation. + +## References + +[1] [Store Interface][store_interface] + +[2] [Default Store][default_store] + +[3] [Full Node Store Initialization][full_node_store_initialization] + +[4] [Block Manager][block manager] + +[5] [Badger DB][BadgerDB] + +[6] [Go Datastore][go-datastore] + +[7] [Key Value Store][kv.go] + +[8] [Serialization][serialization] + +[store_interface]: https://github.com/rollkit/rollkit/blob/main/pkg/store/types.go#L11 +[default_store]: https://github.com/rollkit/rollkit/blob/main/pkg/store/store.go +[full_node_store_initialization]: https://github.com/rollkit/rollkit/blob/main/node/full.go#L96 +[block manager]: https://github.com/rollkit/rollkit/blob/main/block/manager.go +[BadgerDB]: https://github.com/dgraph-io/badger +[go-datastore]: https://github.com/ipfs/go-datastore +[kv.go]: https://github.com/rollkit/rollkit/blob/main/pkg/store/kv.go +[serialization]: https://github.com/rollkit/rollkit/blob/main/types/serialization.go diff --git a/learn/specs/template.md b/learn/specs/template.md new file mode 100644 index 000000000..2effeb880 --- /dev/null +++ b/learn/specs/template.md @@ -0,0 +1,103 @@ +# Protocol/Component Name + +## Abstract + +Provide a concise description of the purpose of the component for which the +specification is written, along with its contribution to the rollkit or +other relevant parts of the system. Make sure to include proper references to +the relevant sections. + +## Protocol/Component Description + +Offer a comprehensive explanation of the protocol, covering aspects such as data +flow, communication mechanisms, and any other details necessary for +understanding the inner workings of this component. + +## Message Structure/Communication Format + +If this particular component is expected to communicate over the network, +outline the structure of the message protocol, including details such as field +interpretation, message format, and any other relevant information. + +## Assumptions and Considerations + +If there are any assumptions required for the component's correct operation, +performance, security, or other expected features, outline them here. +Additionally, provide any relevant considerations related to security or other +concerns. + +## Implementation + +Include a link to the location where the implementation of this protocol can be +found. Note that specific implementation details should be documented in the +rollkit repository rather than in the specification document. + +## References + +List any references used or cited in the document. + +## General Tips + +### How to use a mermaid diagram that you can display in a markdown + +```mermaid + +sequenceDiagram + title Example + participant A + participant B + A->>B: Example + B->>A: Example + + ``` + + ```mermaid + +graph LR + A[Example] --> B[Example] + B --> C[Example] + C --> A + + ``` + + ```mermaid + +gantt + title Example + dateFormat YYYY-MM-DD + section Example + A :done, des1, 2014-01-06,2014-01-08 + B :done, des2, 2014-01-06,2014-01-08 + C :done, des3, 2014-01-06,2014-01-08 + + ``` + +### Grammar and spelling check + +The recommendation is to use your favorite spellchecker extension in your IDE like [grammarly], to make sure that the document is free of spelling and grammar errors. + +### Use of links + +If you want to use links use proper syntax. This goes for both internal and external links like [documentation] or [external links] + +At the bottom of the document in the [References](#references) section, you can add the following footnotes that will be visible in the markdown document: + +[1] [Grammarly][grammarly] + +[2] [Documentation][documentation] + +[3] [external links][external links] + +Then at the bottom add the actual links that will not be visible in the markdown document: + +[grammarly]: https://www.grammarly.com/ +[documentation]: ../README.md +[external links]: https://github.com/celestiaorg/go-header + +### Use of tables + +If you are describing variables, components or other things in a structured list that can be described in a table use the following syntax: + +| Name | Type | Description | +| ---- | ---- | ----------- | +| `name` | `type` | Description | diff --git a/learn/specs/termination.png b/learn/specs/termination.png new file mode 100644 index 000000000..0b61c8f23 Binary files /dev/null and b/learn/specs/termination.png differ diff --git a/specs/overview.md b/specs/overview.md new file mode 100644 index 000000000..6de489024 --- /dev/null +++ b/specs/overview.md @@ -0,0 +1,17 @@ +# Specs Overview + +Welcome to the Rollkit Technical Specifications. + +This is comprehensive documentation on the inner components of Rollkit, including data storage, transaction processing, and more. It’s an essential resource for developers looking to understand, contribute to, and leverage the full capabilities of Rollkit. + +Each file in this folder covers a specific aspect of the system, from block management to data availability and networking. Use this page as a starting point to explore the technical details and architecture of Rollkit. + +## Table of Contents + +- [Block Manager](/learn/specs/block-manager.md): Explains the responsibilities and logic of the block manager in Rollkit. +- [Block Validity](/learn/specs/block-validity.md): Details the rules and checks for block validity within the protocol. +- [Data Availability (DA)](/learn/specs/da.md): Describes how Rollkit ensures data availability and integrates with DA layers. +- [Full Node](/learn/specs/full_node.md): Outlines the architecture and operation of a full node in Rollkit. +- [Header Sync](/learn/specs/header-sync.md): Covers the process and protocol for synchronizing block headers. +- [P2P](/learn/specs/p2p.md): Documents the peer-to-peer networking layer and its protocols. +- [Store](/learn/specs/store.md): Provides information about the storage subsystem and data management.