diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..47b505ec3 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,101 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Build, Test, and Development Commands + +### Build Commands +- `npm run build:dev` - Development build with optimizations disabled +- `npm run build:release` - Production build with bytecode compilation +- `npm run build` - Alias for build:dev +- `./build` - Build script in parent directory (NOT build.sh) + +### Testing +- `npm test` - Run all tests +- `npm run test-watch` - Run tests in watch mode +- `npm run test:unit` - Run unit tests only +- `npm run test:integration` - Run integration tests + +### Code Quality +- `npm run lint` - Run ESLint checks +- `npm run fix` - Auto-fix ESLint issues +- `npm run format-check` - Check code formatting with Prettier +- `npm run format-fix` - Auto-fix formatting issues + +### Development +- Node.js version: 18.19.1 (use nvm) +- `npm run compile` - Compile TypeScript without webpack +- `npm start` - Start the application + +## High-Level Architecture + +Shardus Core is a distributed systems framework for building sharded applications. The architecture follows a layered, modular design: + +### Core Components + +1. **Main Entry (`src/shardus/index.ts`)** + - Central `Shardus` class extending EventEmitter + - Orchestrates all subsystems and provides the main API + - Large files in this module are split with methods bound via `Object.assign()` to the prototype + +2. **State Management (`src/state-manager/`)** + - `StateManager` - Central coordinator for state operations + - `TransactionQueue` - Manages transaction lifecycle (split into .core, .entry, .fact, .handlers modules) + - `AccountCache` - High-performance account state caching + - `AccountPatcher` - Repairs inconsistent states (split into .finder, .handlers modules) + - `TransactionConsensus` - Distributed consensus mechanism + - Files exceeding 25k tokens are split into sub-modules + +3. **P2P Networking (`src/p2p/`)** + - `Context` - Dependency injection container + - `Wrapper` - Main P2P interface + - `CycleCreator/Chain` - Network cycle synchronization + - `Join/v2` - Node joining protocol + - `NodeList` - Active node tracking + - `Apoptosis` - Graceful shutdown + +4. **Key Architectural Patterns** + - **Event-Driven**: EventEmitter for loose coupling + - **Sharding**: Dynamic partition assignment + - **Gossip Protocol**: Efficient message propagation + - **Consensus**: Multi-phase with receipt-based voting + - **Fault Tolerance**: Automatic node failure detection and state repair + +### Data Flow +1. Transactions → `put()` → `TransactionQueue` +2. Queue validates → assigns consensus groups +3. Consensus voting → state application via app callbacks +4. Results propagated → network and archivers + +### Application Interface +Applications must implement: +- `validate(tx, appData)` - Transaction validation +- `crack(tx, appData)` - Extract transaction metadata +- `apply(tx, wrappedStates, appData)` - Apply state changes +- `updateAccountFull/Partial()` - Account state updates +- `calculateAccountHash()` - State verification + +### Module Dependencies +- Most modules depend on `Context` for shared services +- State Manager depends on P2P for network communication +- P2P modules use Network layer for transport +- All modules use Logger and Utils + +### Important Configurations +- Network modes: `forming`, `processing`, `safety`, `recovery`, `restart`, `restore`, `shutdown` +- Cycle phases: Q1-Q4 with specific timing for different operations +- State syncing happens during node join and continuous operation + +## Token Limits and File Splitting + +Due to LLM token limits (25k), large files are split: +- Methods are extracted to separate files and bound back using `Object.assign(Class.prototype, methods)` +- Look for imports like `methodsFromFile` or files ending in `.handlers.ts`, `.finder.ts`, etc. +- When modifying split files, ensure method signatures match the original class interface + +## Network Communication + +- Internal routes use binary serialization for efficiency +- Most endpoints have both JSON and binary versions +- Binary handlers are in `src/types/` with serialize/deserialize functions +- Route enums in `src/types/enum/InternalRouteEnum.ts` \ No newline at end of file diff --git a/docs/api-endpoints.md b/docs/api-endpoints.md new file mode 100644 index 000000000..9005d2943 --- /dev/null +++ b/docs/api-endpoints.md @@ -0,0 +1,390 @@ +# API Endpoints Documentation + +This document provides a comprehensive list of API endpoints available in the project. + +## Table of Contents + +- [Authentication](#authentication) +- [External API Endpoints](#external-api-endpoints) + - [GET Endpoints](#get-endpoints) + - [POST Endpoints](#post-endpoints) +- [Internal API Endpoints](#internal-api-endpoints) + - [Internal Binary Routes](#internal-binary-routes) + - [Gossip Routes](#gossip-routes) + +## Authentication + +Many API endpoints require authentication through debug mode middleware. The project uses different levels of debug mode middleware for authentication: + +- `isDebugModeMiddleware` - Basic debug mode check +- `isDebugModeMiddlewareLow` - Low-level debug access +- `isDebugModeMiddlewareMedium` - Medium-level debug access +- `isDebugModeMiddlewareHigh` - High-level debug access +- `isDebugModeMiddlewareMultiSig` - Multi-signature debug access + +## External API Endpoints + +These are the HTTP endpoints exposed to clients that can be accessed via standard HTTP requests. + +### GET Endpoints + +#### Core Node Information + +1. `/nodeInfo` + - **Description**: Returns information about the current node + - **Parameters**: + - `reportIntermediateStatus` (optional, boolean): Include intermediate status + - `debug` (optional, boolean): Include debug information + - **Response**: Node information including ID, status, and app data + - **Authentication**: None + +2. `/config` + - **Description**: Returns the server configuration + - **Response**: JSON object containing the server configuration + - **Authentication**: Requires debug mode middleware (low level) + +3. `/netconfig` + - **Description**: Returns the network configuration + - **Response**: JSON object containing network configuration + - **Authentication**: None + +4. `/network-stats` + - **Description**: Returns network statistics + - **Response**: JSON object with network statistics + - **Authentication**: None + +#### Node Status and Joining + +5. `/joinInfo` + - **Description**: Returns detailed information about the node's join status + - **Response**: JSON object with join-related information including standby list + - **Authentication**: Requires debug mode middleware (medium level) + +6. `/standby-list-debug` + - **Description**: Returns the list of nodes in standby + - **Response**: Array of nodes in the standby list + - **Authentication**: Requires debug mode middleware (low level) + +7. `/status-history` + - **Description**: Returns the node's status history + - **Response**: JSON object with status history + - **Authentication**: Requires debug mode middleware (low level) + +8. `/debug-neverGoActive` + - **Description**: Toggles the neverGoActive flag + - **Response**: Current state of neverGoActive flag + - **Authentication**: Requires debug mode middleware + +#### Archivers and Data + +9. `/archivers` + - **Description**: Returns list of archivers + - **Response**: JSON object with archiver information + - **Authentication**: None + +10. `/joinedArchiver/:publicKey` + - **Description**: Checks if an archiver is joined + - **Parameters**: `publicKey` (path parameter) + - **Response**: Boolean indicating if the archiver is joined + - **Authentication**: None + +11. `/datarecipients` + - **Description**: Returns data recipients + - **Response**: JSON object with data recipients + - **Authentication**: None + +12. `/download-snapshot-data` + - **Description**: Downloads snapshot data + - **Response**: Snapshot data stream + - **Authentication**: None + +#### System Control + +13. `/forceCycleSync` + - **Description**: Forces cycle synchronization + - **Parameters**: `enable` (boolean): Enable/disable forced cycle sync + - **Response**: Socket report + - **Authentication**: Requires debug mode middleware + +#### Time Management + +14. `/calculate-fake-time-offset` + - **Description**: Calculates a fake time offset + - **Parameters**: + - `shift` (integer): Time shift + - `spread` (integer): Time spread + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (high level) + +15. `/clear-fake-time-offset` + - **Description**: Clears the fake time offset + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (high level) + +16. `/time-report` + - **Description**: Returns time-related information + - **Response**: JSON object with time information + - **Authentication**: Requires debug mode middleware + +#### Logging and Debugging + +17. `/log-fatal` + - **Description**: Sets log level to fatal + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +18. `/log-disable` + - **Description**: Disables all logs + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +19. `/log-error` + - **Description**: Sets log level to error + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +20. `/log-default` + - **Description**: Sets log level to default + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +21. `/log-flag` + - **Description**: Sets a specific log flag + - **Parameters**: + - `name` (string): Flag name + - `value` (boolean): Flag value + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +22. `/log-getflags` + - **Description**: Gets all log flags + - **Response**: JSON object with all log flags + - **Authentication**: Requires debug mode middleware (low level) + +23. `/debug-clearlog` + - **Description**: Clears logs + - **Parameters**: `file` (string): File to clear + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +24. `/debug` + - **Description**: Gets debug archive + - **Response**: Debug archive stream + - **Authentication**: Requires debug mode middleware (medium level) + +25. `/debug-logfile` + - **Description**: Gets a specific log file + - **Parameters**: `file` (string): File to get + - **Response**: Log file content + - **Authentication**: Requires debug mode middleware (medium level) + +#### Performance Monitoring + +26. `/socketReport` + - **Description**: Returns a report of socket connections + - **Response**: JSON object with socket connection details + - **Authentication**: Requires debug mode middleware (low level) + +27. `/memory` + - **Description**: Gets memory usage information + - **Response**: Memory usage details + - **Authentication**: Requires debug mode middleware (low level) + +28. `/memory-short` + - **Description**: Gets short memory usage information + - **Response**: Brief memory usage details + - **Authentication**: Requires debug mode middleware + +29. `/memory-gc` + - **Description**: Triggers garbage collection + - **Response**: Memory usage after garbage collection + - **Authentication**: Requires debug mode middleware + +30. `/perf` + - **Description**: Gets performance report + - **Response**: Performance metrics + - **Authentication**: Requires debug mode middleware + +31. `/perf-scoped` + - **Description**: Gets scoped performance report + - **Response**: Scoped performance metrics + - **Authentication**: Requires debug mode middleware + +32. `/combined-debug` + - **Description**: Gets combined debug information + - **Parameters**: `wait` (integer): Wait time in seconds + - **Response**: Combined debug information + - **Authentication**: Requires debug mode middleware (low level) + +#### Load Management + +33. `/loadset` + - **Description**: Sets load parameters + - **Parameters**: `load` (number): Load value + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware + +34. `/loadreset` + - **Description**: Resets load parameters + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware + +35. `/load` + - **Description**: Gets load information + - **Response**: Current load information + - **Authentication**: None + +#### Transaction Management + +36. `/debug-network-txlist` + - **Description**: Gets network transaction list + - **Response**: List of network transactions + - **Authentication**: Requires debug mode middleware + +37. `/debug-network-txlisthash` + - **Description**: Gets network transaction list hash + - **Response**: Hash of the network transaction list + - **Authentication**: Requires debug mode middleware + +38. `/debug-clear-network-txlist` + - **Description**: Clears network transaction list + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware + +39. `/debug-network-txcount` + - **Description**: Gets network transaction count + - **Response**: Count of network transactions + - **Authentication**: Requires debug mode middleware + +40. `/tx-stats` + - **Description**: Gets transaction statistics + - **Response**: Transaction statistics + - **Authentication**: None + +#### Cycle Recording + +41. `/debug-cycle-recording-enable` + - **Description**: Enables cycle recording + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +42. `/debug-cycle-recording-clear` + - **Description**: Clears cycle recording + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware (medium level) + +43. `/debug-cycle-recording-download` + - **Description**: Downloads cycle recording + - **Response**: Cycle recording data + - **Authentication**: Requires debug mode middleware (medium level) + +### POST Endpoints + +1. `/exit` + - **Description**: Shuts down the node + - **Response**: Success confirmation before shutdown + - **Authentication**: Requires debug mode middleware (high level) + +2. `/exit-apop` + - **Description**: Initiates apoptosis (self-removal) of the node + - **Response**: Success confirmation before apoptosis + - **Authentication**: Requires debug mode middleware (high level) + +3. `/testGlobalAccountTX` + - **Description**: Test endpoint for global account transactions + - **Request Body**: Transaction object + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware + +4. `/testGlobalAccountTXSet` + - **Description**: Test endpoint for global account transaction sets + - **Request Body**: Transaction object + - **Response**: Success confirmation + - **Authentication**: Requires debug mode middleware + +5. `/get-tx-receipt` + - **Description**: Gets transaction receipt + - **Request Body**: Transaction ID + - **Response**: Transaction receipt + - **Authentication**: None + +6. `/joinarchiver` + - **Description**: Joins an archiver + - **Request Body**: Join archiver information + - **Response**: Success confirmation + - **Authentication**: None + +7. `/leavingarchivers` + - **Description**: Leaves archivers + - **Request Body**: Leaving archivers information + - **Response**: Success confirmation + - **Authentication**: None + +8. `/requestdata` + - **Description**: Requests data + - **Request Body**: Data request information + - **Response**: Requested data + - **Authentication**: None + +9. `/querydata` + - **Description**: Queries data + - **Request Body**: Query information + - **Response**: Query results + - **Authentication**: None + +## Internal API Endpoints + +These endpoints are used for internal communication between nodes in the network and are not directly accessible via HTTP. + +### Internal Binary Routes + +1. `binary_gossip` + - **Description**: Handles binary gossip messages between nodes + - **Usage**: Used for efficient propagation of information across the network + - **Handler**: `gossipInternalBinaryRoute` + +2. `binary_compare_cert` + - **Description**: Compares certificates between nodes + - **Usage**: Used during cycle creation and consensus + - **Handler**: `compareCertBinaryHandler` + +3. `binary_sign_app_data` + - **Description**: Signs application data + - **Usage**: Used for cryptographic verification of application data + - **Handler**: Defined in `src/shardus/index.ts` + +### Gossip Routes + +1. `gossip-cert` + - **Description**: Handles certificate gossip + - **Usage**: Used to propagate cycle certificates across the network + - **Handler**: `gossipCertRoute` + +2. `gossip-active` + - **Description**: Handles active status gossip + - **Usage**: Used to propagate node status changes to active + - **Handler**: `gossipActiveRoute` + +## Error Handling + +Most API endpoints return error responses in the following format: + +```json +{ + "error": "Error message", + "message": "Detailed error message" +} +``` + +Common HTTP status codes: +- 200: Success +- 400: Bad Request +- 401: Unauthorized +- 404: Not Found +- 500: Internal Server Error + +## Notes + +- Many endpoints are for debugging purposes and should not be used in production. +- Authentication is required for most debug endpoints. +- Internal endpoints are not directly accessible via HTTP and are used for node-to-node communication. \ No newline at end of file diff --git a/src/network/debugMiddleware.ts b/src/network/debugMiddleware.ts index 813c8e63f..888d76235 100644 --- a/src/network/debugMiddleware.ts +++ b/src/network/debugMiddleware.ts @@ -1,4 +1,7 @@ import { isDebugMode, getDevPublicKeys, ensureKeySecurity, getMultisigPublicKeys, unsafeUnlock } from '../debug' + +// Re-export isDebugMode +export { isDebugMode } import * as Context from '../p2p/Context' import * as crypto from '@shardeum-foundation/lib-crypto-utils' import { DevSecurityLevel } from '../shardus/shardus-types' diff --git a/src/shardus/launch.ts b/src/shardus/launch.ts new file mode 100644 index 000000000..c76bab056 --- /dev/null +++ b/src/shardus/launch.ts @@ -0,0 +1,624 @@ +import * as Network from '../network' +import { isServiceMode } from '../debug' +import * as Context from '../p2p/Context' +import * as Self from '../p2p/Self' +import * as Archivers from '../p2p/Archivers' +import Crypto from '../crypto' +import * as Log4js from 'log4js' +import * as SocketIO from 'socket.io' +import * as NodeList from '../p2p/NodeList' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { logFlags } from '../logger' +import * as CycleCreator from '../p2p/CycleCreator' +import * as CycleChain from '../p2p/CycleChain' +import * as Join from '../p2p/Join' +import * as Active from '../p2p/Active' +import * as Lost from '../p2p/Lost' +import * as Rotation from '../p2p/Rotation' +import * as SyncModule from '../p2p/Sync' +import * as GlobalAccounts from '../p2p/GlobalAccounts' +import * as Comms from '../p2p/Comms' +import * as CycleAutoScale from '../p2p/CycleAutoScale' +import { ipInfo } from '../network' +import { P2P as P2PNamespace } from '@shardeum-foundation/lib-types' +import { nodeListFromStates } from '../p2p/Join' +import { queueFinishedSyncingRequest } from '../p2p/Join' +import { isFirst, waitForQ1SendRequests } from '../p2p/Self' +import { P2P } from '@shardeum-foundation/lib-types' +import { shardusGetTime } from '../network' +import * as ShardusTypes from '../shardus/shardus-types' +import { config } from '../p2p/Context' +import { profilerInstance } from '../utils/profiler' +import * as utils from '../utils' +import { isApopMarkedNode, apoptosizeSelf } from '../p2p/Apoptosis' +import { scheduleLostReport } from '../p2p/Lost' +import Debug from '../debug' +import * as path from 'path' +import Statistics from '../statistics' +import LoadDetection from '../load-detection' +import * as AutoScaling from '../p2p/CycleAutoScale' +import RateLimiting from '../rate-limiting' +import * as Snapshot from '../snapshot' +import Reporter from '../reporter' +import { networkMode } from '../p2p/Modes' + +export const startMethods = { + /** + * Calling this function will start the network + * @param {*} exitProcOnFail Exit the process if an error occurs + */ + async start() { + // Check network up & time synced + await Network.init() + + const isInTimeLimit = await Network.checkAndUpdateTimeSyncedOffset(this.config.p2p.timeServers) + + if (isInTimeLimit === false) { + this.mainLogger.error(`Time is not in sync with the network from checkAndUpdateTimeSyncedOffset process`) + throw new Error(`Time is not in sync with the network during ntpOffsetMs generation`) + } + + if (!isServiceMode()) { + // Setup storage + await this.storage.init() + } + + // Setup crypto + await this.crypto.init() + + try { + const sk: string = this.crypto.keypair.secretKey + this.io = (await this.network.setup(Network.ipInfo, sk)) as SocketIO.Server + Context.setIOContext(this.io) + + /* + * The old middleware is deleted and repurpose into a function + * It was causing problem because as nature of middlewares are expected to run in each events. + * But the authentication payload is only needed to be checked and only supplied once at the socket.io handshake + * This caused the archiver to be able to connect on the first handshake stuck. + * redundant crypto.verify calls were made. + */ + function validateSocketHandshake(socket: SocketIO.Socket, crypto: Crypto, mainLogger: Log4js.Logger): boolean { + // `this` is not binded + // calling `this` in the function will not be the same `this` as the outer LOC were referencing to + try { + if (!Self || !Self.isActive) { + if (!Self.allowConnectionToFirstNode) { + mainLogger.error(`❌ This node is not active yet and kill the socket connection!`) + return false + } + } + // Check if the archiver module is initialized; this is unlikely to happen because of the above Self.isActive check + if (!Archivers.recipients || !Archivers.connectedSockets) { + mainLogger.error(`❌ Seems Archiver module isn't initialized yet, dropping the Socket connection!`) + return false + } + + nestedCountersInstance.countEvent('debug-archiverConnections', `ourIP: ${Self.ip}`) + nestedCountersInstance.countEvent( + 'debug-archiverConnections', + `socket.handshake.address: ${socket.handshake.address.split('::ffff:').pop()}` + ) + nestedCountersInstance.countEvent( + 'debug-archiverConnections', + `socket.handshake.headers.host: ${socket.handshake.headers.host.split(':')[0]}` + ) + + // And we've encountered issues with it in the earthnet + // Plus the archiver is already authenticated by the signature. + // Singature valid and ip are not in sync? then we got a bigger problem to worry about. + // const archiverIP = socket.handshake.address.split('::ffff:').pop(); + // if (!utils.isValidIPv4(archiverIP)) { + // mainLogger.error(`❌ Invalid IP-Address of Archiver: ${archiverIP}`) + // return false + // } + + const archiverCreds = JSON.parse(socket.handshake.query.data) as { + publicKey: string + timestamp: number + intendedConsensor: string + sign: ShardusTypes.Sign + } + // +/- 5sec tolerance + if (Math.abs(archiverCreds.timestamp - shardusGetTime()) > 5000) { + mainLogger.error(`❌ Old signature from Archiver @ ${archiverCreds.publicKey}`) + return false + } + + if (archiverCreds.intendedConsensor !== Self.getThisNodeInfo().publicKey) { + mainLogger.error( + `❌ The signature is targeted for consensor @ ${archiverCreds.intendedConsensor} but this node is ${ + Self.getThisNodeInfo().publicKey + }` + ) + return false + } + + const isValidSig = crypto.verify(archiverCreds, archiverCreds.publicKey) + + if (!isValidSig) { + mainLogger.error(`❌ Invalid Signature from Archiver @ ${archiverCreds.publicKey}`) + return false + } + + if (Object.keys(Archivers.connectedSockets).length >= config.p2p.maxArchiversSubscriptionPerNode) { + /* prettier-ignore */ console.log( `There are already ${config.p2p.maxArchiversSubscriptionPerNode} archivers connected for data transfer!` ) + return false + } + + // we're the genesis node, this mean cycle is empty, archiver list is empty + // nothing to check against. + // In practice genesis node is accompanied with a genesis archiver by the same party at launch + // so this is ok. + if (Self && Self.isFirst) return true + + // specifically this map here to get archiver list is chose because the map is populated by cycle parsing. + // The one like `recipients` map is weaker because they're populated at joinReq + const archiver = Archivers.archivers.get(archiverCreds.publicKey) + + // bypass this check when this is genesis node + if (!archiver) { + mainLogger.error(`❌ Remote Archiver @ ${archiver.publicKey} is NOT recognized!`) + return false + } + + // The ip check is known to have issues with NATs and other network setups. + // And we've encountered issues with it in the earthnet + // Plus the archiver is already authenticated by the signature. + // Singature valid and ip are not in sync? then we got a bigger problem to worry about. + // if (archiverIP !== archiver.ip) { + // mainLogger.error(`❌ PubKey & IP mismatch for Archiver @ ${archiverIP} !`) + // mainLogger.error('Recipient: ', archiver.ip) + // mainLogger.error('Remote Archiver: ', socket.handshake.address) + // return false + // } + + return true + } catch (error) { + mainLogger.error('❌ Error in Archiver Socket-Connection Auth!') + mainLogger.error(error) + return false + } + } + + this.io.on('connection', (socket: any) => { + if (!validateSocketHandshake(socket, this.crypto, this.mainLogger)) { + socket.disconnect() + return + } + + const { publicKey: archiverPublicKey } = JSON.parse(socket.handshake.query.data) + + console.log(`✅ Archive server has subscribed to this node with Socket-ID ${socket.id}!`) + console.log('Archiver has registered its public key', archiverPublicKey) + + // prototype pollution mitigation + // best case is to use the Map<> + // going with local fix atm. Deep copy, freeze. read-only. + let freezedList = Object.freeze(JSON.parse(JSON.stringify(Archivers.connectedSockets))) + + // The same archiver is connected with different stream, let's disconnect old and accept the current one. + if (freezedList[archiverPublicKey]) { + Archivers.removeArchiverConnection(archiverPublicKey) + } + + Archivers.addArchiverConnection(archiverPublicKey, socket.id) + socket.on('UNSUBSCRIBE', function (ARCHIVER_PUBLIC_KEY) { + if (freezedList[ARCHIVER_PUBLIC_KEY] === socket.id) { + console.log(`Archive server with public key ${ARCHIVER_PUBLIC_KEY} has requested to Un-subscribe`) + Archivers.removeArchiverConnection(ARCHIVER_PUBLIC_KEY) + } + }) + }) + } catch (e) { + this.mainLogger.error('Socket connection break', e) + } + this.network.on('timeout', (node, requestId: string, context: string, route: string) => { + const ipPort = `${node.internalIp}:${node.internalPort}` + //this console log is probably redundant but are disabled most of the time anyhow. + //They may help slighly in the case of adding some context to the out.log file when full debugging is on. + /* prettier-ignore */ if (logFlags.p2pNonFatal) console.log(`In Shardus got network timeout-${context}-${route} for request ID - ${requestId} from node: ${utils.logNode(node)} ${ipPort}` ) + const result = isApopMarkedNode(node.id) + if (result) { + /* prettier-ignore */ nestedCountersInstance.countEvent('lostNodes', `timeout-apop-${context}-${route}`) + return + } + if (!config.debug.disableLostNodeReports) scheduleLostReport(node, 'timeout', requestId) + /** [TODO] Report lost */ + /* prettier-ignore */ if (logFlags.p2pNonFatal) nestedCountersInstance.countEvent('lostNodes', `timeout-${context}`) + // context has been added to provide info on the type of timeout and where it happened + /* prettier-ignore */ if (logFlags.p2pNonFatal) nestedCountersInstance.countRareEvent( 'lostNodes', `timeout-${context} ${ipPort}` ) + if (this.network.statisticsInstance) this.network.statisticsInstance.incrementCounter('lostNodeTimeout') + }) + this.network.on( + 'error', + (node, requestId: string, context: string, errorGroup: string, route: string, subRoute = '') => { + const ipPort = `${node.internalIp}:${node.internalPort}` + //this console log is probably redundant but are disabled most of the time anyhow. + //They may help slighly in the case of adding some context to the out.log file when full debugging is on. + /* prettier-ignore */ if (logFlags.p2pNonFatal) console.log(`In Shardus got network error-${context} ${route}-${subRoute} for request ID ${requestId} from node: ${utils.logNode(node)} ${ipPort} error:${errorGroup}` ) + /* prettier-ignore */ if (logFlags.p2pNonFatal) console.log(`node:`, node) + /* prettier-ignore */ if (logFlags.p2pNonFatal) console.log(`requestId: ${requestId}, context: ${context}, route: ${route}, subRoute: ${subRoute}, errorGroup: ${errorGroup}`) + if (!config.debug.disableLostNodeReports) scheduleLostReport(node, 'error', requestId) + /** [TODO] Report lost */ + /* prettier-ignore */ nestedCountersInstance.countEvent('lostNodes', `error-${context}-${route}-${subRoute}`) + /* prettier-ignore */ nestedCountersInstance.countRareEvent( 'lostNodes', `error-${context} ${ipPort}` ) + } + ) + + // Setup other modules + this.debug = new Debug(this.config.baseDir, this.network) + this.debug.addToArchive(this.logger.logDir, './logs') + this.debug.addToArchive(path.parse(this.storage.storage.storageConfig.options.storage).dir, './db') + + if (!isServiceMode()) { + this.statistics = new Statistics( + this.config.baseDir, + this.config.statistics, + { + counters: [ + 'txInjected', + 'txApplied', + 'txRejected', + 'txExpired', + 'txProcessed', + 'networkTimeout', + 'lostNodeTimeout', + ], + watchers: { + queueLength: () => (this.stateManager ? this.stateManager.transactionQueue._transactionQueue.length : 0), + executeQueueLength: () => + this.stateManager ? this.stateManager.transactionQueue.getExecuteQueueLength() : 0, + serverLoad: () => (this.loadDetection ? this.loadDetection.getCurrentLoad() : 0), + }, + timers: ['txTimeInQueue'], + manualStats: ['netInternalDuty', 'netExternalDuty'], + fifoStats: ['cpuPercent'], + ringOverrides: {}, + fifoOverrides: { cpuPercent: 240 }, + }, + this + ) + } + this.debug.addToArchive('./statistics.tsv', './statistics.tsv') + + this.profiler.setStatisticsInstance(this.statistics) + this.network.setStatisticsInstance(this.statistics) + + this.statistics + + this.loadDetection = new LoadDetection(this.config.loadDetection, this.statistics) + this.loadDetection.on('highLoad', () => { + // console.log(`High load detected Cycle ${currentCycle}, Quarter: ${currentQuarter}`) + nestedCountersInstance.countEvent('loadRelated', 'highLoad') + AutoScaling.requestNetworkUpsize() + }) + this.loadDetection.on('lowLoad', () => { + // console.log(`Low load detected Cycle ${currentCycle}, Quarter: ${currentQuarter}`) + nestedCountersInstance.countEvent('loadRelated', 'lowLoad') + AutoScaling.requestNetworkDownsize() + }) + + if (!isServiceMode()) this.statistics.on('snapshot', () => this.loadDetection.updateLoad()) + + this.rateLimiting = new RateLimiting(this.config.rateLimiting, this.seqLogger) + + Context.setShardusContext(this) + + // Init new P2P + Self.init() + + if (this.app) { + this._createAndLinkStateManager() + this._attemptCreateAppliedListener() + + let disableSnapshots = !!(this.config && this.config.debug && this.config.debug.disableSnapshots === true) + if (disableSnapshots != true) { + // Start state snapshotting once you go active with an app + this.once('active', Snapshot.startSnapshotting) + } + } + + this.reporter = + this.config.reporting.report && !isServiceMode() + ? new Reporter( + this.config.reporting, + this.logger, + this.statistics, + this.stateManager, + this.profiler, + this.loadDetection + ) + : null + Context.setReporterContext(this.reporter) + + this._registerRoutes() + + // this.io.on('disconnect') + + // Register listeners for P2P events + Self.emitter.on('witnessing', async (publicKey) => { + this.logger.playbackLogState('witnessing', '', publicKey) + await Snapshot.startWitnessMode() + }) + Self.emitter.on('joining', (publicKey) => { + // this.io.emit('DATA', `NODE JOINING ${publicKey}`) + this.logger.playbackLogState('joining', '', publicKey) + if (this.reporter) this.reporter.reportJoining(publicKey) + }) + Self.emitter.on('joined', (nodeId, publicKey) => { + // this.io.emit('DATA', `NODE JOINED ${nodeId}`) + this.logger.playbackLogState('joined', nodeId, publicKey) + this.logger.setPlaybackID(nodeId) + if (this.reporter) this.reporter.reportJoined(nodeId, publicKey) + }) + Self.emitter.on('initialized', async () => { + // If network is in safety mode + const newest = CycleChain.getNewest() + // changed from using safetyMode to mode + if (newest && (newest.mode === 'restart' || newest.mode === 'recovery')) { + // Stay in syncing mode and let other nodes join + Self.setp2pIgnoreJoinRequests(false) + console.log('p2pIgnoreJoinRequests = false') + nestedCountersInstance.countEvent('restore', `intialized: ${newest.mode}. ${shardusGetTime()}`) + } else { + // not doing a safety sync + // todo hook this up later cant deal with it now. + // await this.storage.deleteOldDBPath() + + /* // LOCAL_OOS_TEST_SUPPORT not for production + this.mainLogger.info('sync-p2p synced waiting 4 min') + await utils.sleep(240000) //do not release this helps us have a chance + //to query /config before the node syncs data + */ + this.mainLogger.info('sync-syncAppData') + await this.syncAppData() + } + }) + Self.emitter.on('restore', async (cycleNumber: number) => { + console.log('restore mode triggered on cycle', cycleNumber) + this.logger.playbackLogState('restore', '', `Restore mode triggered on cycle ${cycleNumber}`) + + nestedCountersInstance.countEvent( + 'restore', + `restore event: entered. seen on cycle:${cycleNumber} ${shardusGetTime()}` + ) + await this.stateManager.waitForShardCalcs() + nestedCountersInstance.countEvent('restore', `restore event: got shard calcs. ${shardusGetTime()}`) + // Start restoring state data + try { + this.stateManager.renewState() + await this.stateManager.accountSync.initialSyncMain(3) + console.log('restore - initialSyncMain finished') + nestedCountersInstance.countEvent('restore', `restore event: syncAppData finished. ${shardusGetTime()}`) + } catch (err) { + console.log() + this.fatalLogger.fatal('restore-failed with Error: ' + utils.formatErrorMessage(err)) + nestedCountersInstance.countEvent('restore', `restore event: fail and apop self. ${shardusGetTime()}`) + apoptosizeSelf(`restore-failed: ${err?.message}`, 'Node stopped due to network restore failure.') + return + } + + // After restoring state data, set syncing flags to true and go active + await this.stateManager.startCatchUpQueue() + console.log('restore - startCatchUpQueue') + nestedCountersInstance.countEvent('restore', `restore event: finished startCatchUpQueue. ${shardusGetTime()}`) + await this.app.sync() + console.log('syncAppData - sync') + + await queueFinishedSyncingRequest() + console.log('syncAppData - queueFinishedSyncingRequest') + nestedCountersInstance.countEvent('restore', `restore event: queue finished-syncing-request ${shardusGetTime()}`) + + this.stateManager.appFinishedSyncing = true + this.stateManager.startProcessingCycleSummaries() + }) + Self.emitter.on('active', (nodeId) => { + // this.io.emit('DATA', `NODE ACTIVE ${nodeId}`) + this.logger.playbackLogState('active', nodeId, '') + if (this.reporter) { + this.reporter.reportActive(nodeId) + this.reporter.startReporting() + } + if (this.statistics) this.statistics.startSnapshots() + this.emit('active', nodeId) + }) + Self.emitter.on('failed', () => { + this.mainLogger.info('shutdown: on failed event') + this.shutdown(true) + }) + Self.emitter.on('error', (e) => { + console.log(e.message + ' at ' + e.stack) + if (logFlags.debug) this.mainLogger.debug('shardus.start() ' + e.message + ' at ' + e.stack) + // normally fatal error keys should not be variable ut this seems like an ok exception for now + this.shardus_fatal(`onError_ex` + e.message + ' at ' + e.stack, 'shardus.start() ' + e.message + ' at ' + e.stack) + throw new Error(e) + }) + Self.emitter.on('removed', async () => { + // Omar - Why are we trying to call the functions in modules directly before exiting. + // The modules have already registered shutdown functions with the exitHandler. + // We should let exitHandler handle the shutdown process. + /* + if (this.statistics) { + this.statistics.stopSnapshots() + this.statistics.initialize() + } + if (this.reporter) { + this.reporter.stopReporting() + await this.reporter.reportRemoved(Self.id) + } + if (this.app) { + this.app.deleteLocalAccountData() + this._attemptRemoveAppliedListener() + this._unlinkStateManager() + await this.stateManager.cleanup() + } + + // Shutdown cleanly + process.exit() +*/ + this.mainLogger.info(`exitCleanly: removed`) + if (this.reporter) { + this.reporter.stopReporting() + await this.reporter.reportRemoved(Self.id) + } + this.exitHandler.exitCleanly(`removed`, `removed from network in normal conditions`) // exits with status 0 so that PM2 can restart the process + }) + Self.emitter.on('app-removed', async () => { + this.mainLogger.info(`exitCleanly: app removed`) + if (this.reporter) { + this.reporter.stopReporting() + await this.reporter.reportRemoved(Self.id) + } + this.exitHandler.exitCleanly(`removed`, `removed from network requested by app`) // exits with status 0 so that + }) + Self.emitter.on('invoke-exit', async (tag: string, callstack: string, message: string, restart: boolean) => { + // Omar - Why are we trying to call the functions in modules directly before exiting. + // The modules have already registered shutdown functions with the exitHandler. + // We should let exitHandler handle the shutdown process. + /* + this.fatalLogger.fatal('Shardus: caught apoptosized event; cleaning up') + if (this.statistics) { + this.statistics.stopSnapshots() + this.statistics.initialize() + } + if (this.reporter) { + this.reporter.stopReporting() + await this.reporter.reportRemoved(Self.id) + } + if (this.app) { + this.app.deleteLocalAccountData() + this._attemptRemoveAppliedListener() + this._unlinkStateManager() + await this.stateManager.cleanup() + } + this.fatalLogger.fatal( + 'Shardus: caught apoptosized event; finished clean up' + ) +*/ + const exitType = restart ? 'exitCleanly' : 'exitUncleanly' + nestedCountersInstance.countRareEvent('fatal', `invoke-exit: ${tag} ${exitType}`) + this.mainLogger.error(`invoke-exit: ${tag} ${exitType}`) + this.mainLogger.error(message) + this.mainLogger.error(callstack) + if (this.reporter) { + this.reporter.stopReporting() + await this.reporter.reportRemoved(Self.id) + } + if (restart) + this.exitHandler.exitCleanly(`invoke-exit: ${tag}`, `invoke-exit: ${tag}. but exiting cleanly for a restart`) + // exits with status 0 so that PM2 can restart the process + else this.exitHandler.exitUncleanly(`invoke-exit: ${tag}`, `invoke-exit: ${tag} ${exitType}`) // exits with status 1 so that PM2 CANNOT restart the process + }) + Self.emitter.on('node-activated', async ({ ...params }) => { + if (networkMode === 'shutdown') return + try { + const result: any = this.app.eventNotify?.({ type: 'node-activated', ...params }) + if (result instanceof Promise) { + await result + } + } catch (e) { + this.mainLogger.error(`Error: while processing node-activated event stack: ${utils.formatErrorMessage(e)}`) + } + }) + Self.emitter.on('node-deactivated', async ({ ...params }) => { + if (networkMode === 'shutdown') return + try { + const result: any = this.app.eventNotify?.({ type: 'node-deactivated', ...params }) + if (result instanceof Promise) { + await result + } + } catch (e) { + this.mainLogger.error(`Error: while processing node-deactivated event stack: ${utils.formatErrorMessage(e)}`) + } + }) + Self.emitter.on('node-refuted', async ({ ...params }) => { + try { + if (!this.stateManager.currentCycleShardData) throw new Error('No current cycle data') + if (params.publicKey == null) throw new Error('No node publicKey provided for node-refuted event') + const consensusNodes = this.getConsenusGroupForAccount(params.publicKey) + for (let node of consensusNodes) { + if (node.id === Self.id) { + const result: any = this.app.eventNotify?.({ type: 'node-refuted', ...params }) + if (result instanceof Promise) { + await result + } + } + } + } catch (e) { + this.mainLogger.error(`Error: while processing node-refuted event stack: ${utils.formatErrorMessage(e)}`) + } + }) + Self.emitter.on('node-left-early', async ({ ...params }) => { + try { + if (!this.stateManager.currentCycleShardData) throw new Error('No current cycle data') + if (params.publicKey == null) throw new Error('No node publicKey provided for node-left-early event') + const consensusNodes = this.getConsenusGroupForAccount(params.publicKey) + for (let node of consensusNodes) { + if (node.id === Self.id) { + const result: any = this.app.eventNotify?.({ type: 'node-left-early', ...params }) + if (result instanceof Promise) { + await result + } + } + } + } catch (e) { + this.mainLogger.error(`Error: while processing node-left-early event stack: ${utils.formatErrorMessage(e)}`) + } + }) + Self.emitter.on('node-sync-timeout', async ({ ...params }) => { + try { + if (!this.stateManager.currentCycleShardData) throw new Error('No current cycle data') + if (params.publicKey == null) throw new Error('No node publicKey provided for node-sync-timeout event') + const consensusNodes = this.getConsenusGroupForAccount(params.publicKey) + for (let node of consensusNodes) { + if (node.id === Self.id) { + const result: any = this.app.eventNotify?.({ type: 'node-sync-timeout', ...params }) + if (result instanceof Promise) { + await result + } + break + } + } + } catch (e) { + this.mainLogger.error(`Error: while processing node-sync-timeout event stack: ${utils.formatErrorMessage(e)}`) + } + }) + Self.emitter.on('try-network-transaction', async ({ ...params }) => { + try { + const result: any = this.app.eventNotify?.({ type: 'try-network-transaction', ...params }) + if (result instanceof Promise) { + await result + } + } catch (e) { + this.mainLogger.error( + `Error: while processing try-network-transaction event stack: ${utils.formatErrorMessage(e)}` + ) + } + }) + + // Start P2P + await Self.startupV2(this) + + // handle config queue changes and debug logic updates + this._registerListener(this.p2p.state, 'cycle_q1_start', async () => { + let lastCycle = CycleChain.getNewest() + + if (lastCycle === null) { + return + } + + // need to make sure sync is finish or we may not have the global account + // even worse, the dapp may not have initialized storage yet + if (this.stateManager.appFinishedSyncing === true) { + //query network account from the app for changes + const account = await this.app.getNetworkAccount() + + this.updateConfigChangeQueue(account, lastCycle.counter, true) + } + + this.updateDebug(lastCycle) + }) + + } +} diff --git a/src/shardus/routes.ts b/src/shardus/routes.ts new file mode 100644 index 000000000..37fbfe2dc --- /dev/null +++ b/src/shardus/routes.ts @@ -0,0 +1,256 @@ +import { isDebugModeMiddlewareHigh, isDebugModeMiddlewareLow, isDebugModeMiddlewareMedium, isDebugMode } from '../network/debugMiddleware' +import { apoptosizeSelf } from '../p2p/Apoptosis' +import * as Self from '../p2p/Self' +import * as CycleCreator from '../p2p/CycleCreator' +import * as CycleChain from '../p2p/CycleChain' +// netConfig import removed - will use config.server instead +import * as Lost from '../p2p/Lost' +import * as NodeList from '../p2p/NodeList' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { logFlags } from '../logger' +import { Utils } from '@shardeum-foundation/lib-types' +import * as Comms from '../p2p/Comms' +import { Node } from '@shardeum-foundation/lib-types/build/src/p2p/NodeListTypes' +import * as ServiceQueue from '../p2p/ServiceQueue' +import { shardusGetTime, calculateFakeTimeOffset, clearFakeTimeOffset } from '../network' +import { profilerInstance } from '../utils/profiler' +import * as utils from '../utils' +import * as JoinV2 from '../p2p/Join/v2' +import { config } from '../p2p/Context' +import { getSocketReport } from '../utils/debugUtils' +import { lostArchiversMap } from '../p2p/LostArchivers/state' +import { InternalBinaryHandler } from '../types/Handler' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' +import { getStreamWithTypeCheck, requestErrorHandler } from '../types/Helpers' +import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' +import { SignAppDataReq, deserializeSignAppDataReq, serializeSignAppDataReq } from '../types/SignAppDataReq' +import { SignAppDataResp, deserializeSignAppDataResp, serializeSignAppDataResp } from '../types/SignAppDataResp' +import { Route } from '@shardeum-foundation/lib-types/build/src/p2p/P2PTypes' + +// Local deepReplace function +function deepReplace(obj: object | ArrayLike, find: any, replace: any): any { + if (Array.isArray(obj)) { + for (let i = 0; i < obj.length; i++) { + if (obj[i] === find) { + obj[i] = replace + } else if (typeof obj[i] === 'object' && obj[i] !== null) { + deepReplace(obj[i], find, replace) + } + } + } else if (typeof obj === 'object' && obj !== null) { + for (const key in obj) { + if (obj[key] === find) { + obj[key] = replace + } else if (typeof obj[key] === 'object' && obj[key] !== null) { + deepReplace(obj[key], find, replace) + } + } + } + return obj +} + +export const routesMethods = { + _registerRoutes() { + // DEBUG routes + this.network.registerExternalPost('exit', isDebugModeMiddlewareHigh, async (_req, res) => { + res.json({ success: true }) + await this.shutdown() + }) + // TODO elevate security beyond high when we get multi sig. or is that too slow when needed? + this.network.registerExternalPost('exit-apop', isDebugModeMiddlewareHigh, async (_req, res) => { + apoptosizeSelf('Apoptosis called at exit-apop route', 'Node stopped by call to exit-apop route.') + res.json({ success: true }) + }) + + this.network.registerExternalGet('config', isDebugModeMiddlewareLow, async (_req, res) => { + res.json({ config: this.config }) + }) + this.network.registerExternalGet('netconfig', async (_req, res) => { + res.json({ config: this.config.server }) + }) + + this.network.registerExternalGet('nodeInfo', async (req, res) => { + let reportIntermediateStatus = req.query.reportIntermediateStatus === 'true' + const nodeInfo = Self.getPublicNodeInfo(reportIntermediateStatus) + const appData = this.app.getNodeInfoAppData() + let result = { nodeInfo: { ...nodeInfo, appData } } as any + if (isDebugMode() && req.query.debug === 'true') { + result.debug = { + queriedWhen: new Date().toISOString(), + //Note we can't convert to shardusGetTime because process.uptime() uses Date.now() internally + startedWhen: new Date(Date.now() - process.uptime() * 1000).toISOString(), + uptimeMins: Math.round((100 * process.uptime()) / 60) / 100, + pid: process.pid, + currentQuarter: CycleCreator.currentQuarter, + currentCycleMarker: CycleChain.getCurrentCycleMarker() ?? null, + newestCycle: CycleChain.getNewest() ?? null, + lostArchiversMap: lostArchiversMap, + } + } + res.json(result) + }) + + this.network.registerExternalGet('joinInfo', isDebugModeMiddlewareMedium, async (_req, res) => { + const nodeInfo = Self.getPublicNodeInfo(true) + let result = { + respondedWhen: new Date().toISOString(), + //Note we can't convert to shardusGetTime because process.uptime() uses Date.now() internally + startedWhen: new Date(Date.now() - process.uptime() * 1000).toISOString(), + uptimeMins: Math.round((100 * process.uptime()) / 60) / 100, + pid: process.pid, + publicKey: nodeInfo.publicKey, + id: nodeInfo.id, + status: nodeInfo.status, + currentQuarter: CycleCreator.currentQuarter, + currentCycleMarker: CycleChain.getCurrentCycleMarker() ?? null, + previousCycleMarker: CycleChain.getNewest()?.previous, + getStandbyListHash: JoinV2.getStandbyListHash(), + getLastHashedStandbyList: JoinV2.getLastHashedStandbyList(), + getSortedStandbyNodeList: JoinV2.getSortedStandbyJoinRequests(), + } + res.json(deepReplace(result, undefined, '__undefined__')) + }) + + this.network.registerExternalGet('standby-list-debug', isDebugModeMiddlewareLow, async (_req, res) => { + let getSortedStandbyNodeList = JoinV2.getSortedStandbyJoinRequests() + let result = getSortedStandbyNodeList.map((node) => ({ + pubKey: node.nodeInfo.publicKey, + ip: node.nodeInfo.externalIp, + port: node.nodeInfo.externalPort, + })) + res.json(result) + }) + + this.network.registerExternalGet('status-history', isDebugModeMiddlewareLow, async (_req, res) => { + let result = Self.getStatusHistoryCopy() + res.json(deepReplace(result, undefined, '__undefined__')) + }) + + this.network.registerExternalGet('socketReport', isDebugModeMiddlewareLow, async (_req, res) => { + const report = getSocketReport() + res.json(report) + }) + this.network.registerExternalGet('forceCycleSync', isDebugModeMiddlewareHigh, async (req, res) => { + let enable = req.query.enable === 'true' || false + this.config.p2p.hackForceCycleSyncComplete = enable + res.json(await getSocketReport()) + }) + + this.network.registerExternalGet('calculate-fake-time-offset', isDebugModeMiddlewareHigh, async (req, res) => { + const shift = req.query.shift ? parseInt(req.query.shift as string) : 0 + const spread = req.query.spread ? parseInt(req.query.spread as string) : 0 + const offset = calculateFakeTimeOffset(shift, spread) + /* prettier-ignore */ this.mainLogger.debug({ message: "Calculated fakeTimeOffset", data: { shift, spread, offset } }); + res.json({ success: true }) + }) + + this.network.registerExternalGet('clear-fake-time-offset', isDebugModeMiddlewareHigh, async (_req, res) => { + const offset = clearFakeTimeOffset() + /* prettier-ignore */ this.mainLogger.debug({ message: "Cleared fakeTimeOffset", data: { offset } }); + res.json({ success: true }) + }) + + this.p2p.registerInternal( + 'sign-app-data', + async ( + payload: { + type: string + nodesToSign: string + hash: string + appData: any + }, + respond: (arg0: any) => any + ) => { + const { type, nodesToSign, hash, appData } = payload + const { success, signature } = await this.app.signAppData?.(type, hash, Number(nodesToSign), appData) + + await respond({ success, signature }) + } + ) + + // Binary handler for sign-app-data + const signAppDataBinaryHandler: Route> = { + name: InternalRouteEnum.binary_sign_app_data, + handler: async (payload, respond) => { + const route = InternalRouteEnum.binary_sign_app_data + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route) + + const errorHandler = (errorType: RequestErrorEnum, opts?: any): boolean => { + nestedCountersInstance.countEvent('internal', `${route}-${errorType}`) + if (logFlags.error) { + const { nodeId } = opts || {} + const errorMsg = `Error in ${route}: ${errorType} from node ${nodeId ?? 'unknown'}` + this.mainLogger.error(errorMsg) + } + return false + } + + try { + const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cSignAppDataReq) + if (!stream) { + respond(errorHandler(RequestErrorEnum.InvalidRequest), null) + return + } + + const req: SignAppDataReq = deserializeSignAppDataReq(stream) + const { type, hash, nodesToSign, appData } = req + const { success, signature } = await this.app.signAppData?.(type, hash, nodesToSign, appData) + + const response: SignAppDataResp = { success, signature } + respond(response, serializeSignAppDataResp) + } catch (ex) { + if (logFlags.error) { + this.mainLogger.error(`${route}: ${ex.message}`) + } + nestedCountersInstance.countEvent('internal', `${route}-exception`) + respond(errorHandler(RequestErrorEnum.InvalidRequest), null) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + } + } + this.p2p.registerInternalBinary(signAppDataBinaryHandler.name, signAppDataBinaryHandler.handler) + + this.network.registerExternalGet('debug-toggle-foreverloop', isDebugModeMiddlewareHigh, (req, res) => { + this.debugForeverLoopsEnabled = !this.debugForeverLoopsEnabled + //optionally check the query param set and use that instead + if (req.query.set) { + this.debugForeverLoopsEnabled = req.query.set === 'true' + } + res.json({ debugForeverLoopsEnabled: this.debugForeverLoopsEnabled }) + }) + + // FOR internal testing. NEEDS to be removed for security purposes + this.network.registerExternalPost('testGlobalAccountTX', isDebugModeMiddlewareHigh, async (req, res) => { + try { + this.mainLogger.debug(`testGlobalAccountTX: req:${utils.stringifyReduce(req.body)}`) + const tx = req.body.tx + this.put(tx, false, true) + res.json({ success: true }) + } catch (ex) { + this.mainLogger.debug('testGlobalAccountTX:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) + this.shardus_fatal( + `registerExternalPost_ex`, + 'testGlobalAccountTX:' + ex.name + ': ' + ex.message + ' at ' + ex.stack + ) + } + }) + + this.network.registerExternalPost('testGlobalAccountTXSet', isDebugModeMiddlewareHigh, async (req, res) => { + try { + this.mainLogger.debug(`testGlobalAccountTXSet: req:${utils.stringifyReduce(req.body)}`) + const tx = req.body.tx + this.put(tx, true, true) + res.json({ success: true }) + } catch (ex) { + this.mainLogger.debug('testGlobalAccountTXSet:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) + this.shardus_fatal( + `registerExternalPost2_ex`, + 'testGlobalAccountTXSet:' + ex.name + ': ' + ex.message + ' at ' + ex.stack + ) + } + }) + } +} diff --git a/src/shardus/saveConsoleOutput.ts b/src/shardus/saveConsoleOutput.ts index f19503619..1b063b160 100644 --- a/src/shardus/saveConsoleOutput.ts +++ b/src/shardus/saveConsoleOutput.ts @@ -20,3 +20,4 @@ export function startSaving(baseDir: string): void { // Monkey patch the global console with a new one that uses our passthroughs console = new Console({ stdout: outPass, stderr: errPass }) // eslint-disable-line no-global-assign } + diff --git a/src/shardus/shardus-types.ts b/src/shardus/shardus-types.ts index 0eaa33daa..82749eeef 100644 --- a/src/shardus/shardus-types.ts +++ b/src/shardus/shardus-types.ts @@ -1708,3 +1708,4 @@ export interface ValidatorNodeDetails { port: number publicKey: string } + diff --git a/src/shardus/sync.ts b/src/shardus/sync.ts new file mode 100644 index 000000000..cdd13680a --- /dev/null +++ b/src/shardus/sync.ts @@ -0,0 +1,103 @@ +import * as Self from '../p2p/Self' +import * as CycleChain from '../p2p/CycleChain' +import * as Context from '../p2p/Context' +import * as Comms from '../p2p/Comms' +import * as NodeList from '../p2p/NodeList' +import { nodeListFromStates, queueFinishedSyncingRequest } from '../p2p/Join' +import { P2P } from '@shardeum-foundation/lib-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { shardusGetTime } from '../network' +import { apoptosizeSelf } from '../p2p/Apoptosis' +import * as utils from '../utils' +import { logFlags } from '../logger' + +export const syncMethods = { + /** + * Function used to allow shardus to sync data specific to an app if it should be required + */ + async syncAppData() { + if (!this.app) { + //await this.p2p. + let readyPayload = { + nodeId: Self.id, + cycleNumber: CycleChain.getNewest()?.counter, + } + readyPayload = Context.crypto.sign(readyPayload) + Comms.sendGossip( + 'gossip-sync-finished', + readyPayload, + undefined, + undefined, + nodeListFromStates([ + P2P.P2PTypes.NodeStatus.ACTIVE, + P2P.P2PTypes.NodeStatus.READY, + P2P.P2PTypes.NodeStatus.SYNCING, + ]) + ) + if (this.stateManager) { + this.stateManager.appFinishedSyncing = true + } + return + } + console.log('syncAppData') + if (this.stateManager) { + try { + await this.stateManager.accountSync.initialSyncMain(3) + console.log('syncAppData - initialSyncMain finished') + } catch (err) { + this.fatalLogger.fatal('initialSyncMain-failed with Error: ' + utils.formatErrorMessage(err)) + nestedCountersInstance.countEvent( + 'syncAppData', + `initialSyncMain event: fail and apop self. ${shardusGetTime()}` + ) + apoptosizeSelf( + `initialSyncMain-failed: ${err?.message}`, + 'Node stopped due to node performance or network issues during initial app data sync.' + ) + return + } + } + // if (this.stateManager) await this.stateManager.accountSync.syncStateDataFast(3) // fast mode + if (this.p2p.isFirstSeed) { + console.log('syncAppData - isFirstSeed') + // the following comment of delay is probably not relavent now as we are using cycle txs + // we don't have a delay here as there's practically no time between sync-started and sync-finished for the first node + // since we already wait fro sync-finished, its very unlikely we'll be in the wrong quarter + await queueFinishedSyncingRequest() + console.log('syncAppData - queueFinishedSyncingRequest') + nestedCountersInstance.countEvent('p2p', `queue finished-syncing-request ${shardusGetTime()}`) + await this.stateManager.waitForShardCalcs() + await this.app.sync() + console.log('syncAppData - sync') + this.stateManager.appFinishedSyncing = true + Self.setp2pIgnoreJoinRequests(false) + console.log('p2pIgnoreJoinRequests = false') + } else { + await this.stateManager.startCatchUpQueue() + console.log('syncAppData - startCatchUpQueue') + await this.app.sync() + console.log('syncAppData - sync') + Self.setp2pIgnoreJoinRequests(false) + console.log('p2pIgnoreJoinRequests = false') + + await queueFinishedSyncingRequest() + console.log('syncAppData - queueFinishedSyncingRequest') + nestedCountersInstance.countEvent('p2p', `queue finished-syncing-request ${shardusGetTime()}`) + this.stateManager.appFinishedSyncing = true + } + // Set network joinable to true + this.p2p.setJoinRequestToggle(true) + console.log('Server ready!') + if (this.stateManager) { + await utils.sleep(3000) + // Original sync check + // this.stateManager.enableSyncCheck() + + // Partition check and data repair (new) + // disable and compare this.stateManager.startSyncPartitions() + + //this.stateManager.partitionObjects.startSyncPartitions() + this.stateManager.startProcessingCycleSummaries() + } + } +} diff --git a/src/shardus/transaction.ts b/src/shardus/transaction.ts new file mode 100644 index 000000000..fe8504a0d --- /dev/null +++ b/src/shardus/transaction.ts @@ -0,0 +1,679 @@ +import * as ShardusTypes from './shardus-types' +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import * as CycleCreator from '../p2p/CycleCreator' +import * as Self from '../p2p/Self' +import { Utils } from '@shardeum-foundation/lib-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as utils from '../utils' +import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' +// apoptosizeSelf and getOurNodeIndex imported from Self already +import * as NodeList from '../p2p/NodeList' +import { SignedObject } from '@shardeum-foundation/lib-crypto-utils' +import * as Comms from '../p2p/Comms' +import * as ServiceQueue from '../p2p/ServiceQueue' +import * as CycleChain from '../p2p/CycleChain' +import * as Context from '../p2p/Context' +import { profilerInstance } from '../utils/profiler' +import { Node } from '@shardeum-foundation/lib-types/build/src/p2p/NodeListTypes' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +// serializeInjectTxReq and deserializeInjectTxResp imports removed +// shardusFactory import removed +// InjectTxReqSerialized, InjectTxRespSerialized imports removed +import { inspect } from 'util' +import ShardFunctions from '../state-manager/shardFunctions' +import { nodes } from '../p2p/NodeList' +import { isNodeInRotationBounds } from '../p2p/Utils' +import { isValidShardusAddress, inRangeOfCurrentTime } from '../utils' +import { getNetworkTimeOffset } from '../network' +import { ServerMode } from './shardus-types' +import { NonceQueueItem } from '../state-manager/state-manager-types' + +export const transactionMethods = { + /** + * Submits a transaction to the network + * Returns an object that tells whether a tx was successful or not and the reason why via the + * validateTxnFields application SDK function. + * Throws an error if an application was not provided to shardus. + * + * @param tx the TX format is not known to shardus core and can be any object + * @param set this is an old feaure that can be used by the first node in the network to inject TXs early. candidate for deprecation + * @param global this is used for injecting a tx that changes a global account completely different consensus is used for these. see src/p2p/GlobalAccounts.ts + * @param inputAppData optional opaque app data that can be passed in. this is forwared to the dapp when precrack is called. + * @returns + * { + * success: boolean, + * reason: string, + * staus: number + * } + */ + async put( + tx: ShardusTypes.OpaqueTransaction | ShardusTypes.ReinjectedOpaqueTransaction, + set = false, + global = false, + inputAppData = null + ): Promise<{ success: boolean; reason: string; status: number; txId?: string }> { + const noConsensus = set || global + const txId = this.app.calculateTxId(tx) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: inject:${shardusGetTime()}`) + + // Check if Consensor is ready to receive txs before processing it further + if (!this.appProvided) throw new Error('Please provide an App object to Shardus.setup before calling Shardus.put') + if (logFlags.verbose) + this.mainLogger.debug(`Start of injectTransaction ${Utils.safeStringify(tx)} set:${set} global:${global}`) // not reducing tx here so we can get the long hashes + if (!this.stateManager.accountSync.dataSyncMainPhaseComplete) { + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', '!dataSyncMainPhaseComplete') + return { success: false, reason: 'Node is still syncing.', status: 500 } + } + if (!this.stateManager.hasCycleShardData()) { + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', '!hasCycleShardData') + return { + success: false, + reason: 'Not ready to accept transactions, shard calculations pending', + status: 500, + } + } + // set === true (which is handled in the else case here) is a special kind of TX that is allowed only be the first node in the network + // this is used to create global network settings and other dawn of time accounts + if (set === false) { + if (!this.p2p.allowTransactions()) { + if (global === true && this.p2p.allowSet()) { + // This ok because we are initializing a global at the set time period + } else { + if (logFlags.verbose) + this.mainLogger.debug(`txRejected ${Utils.safeStringify(tx)} set:${set} global:${global}`) + + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', '!allowTransactions') + return { + success: false, + reason: 'Network conditions to allow transactions are not met.', + status: 500, + } + } + } + } else { + // this is where set is true. check if we allow it (i.e. only one node active). if not, reject early + if (!this.p2p.allowSet()) { + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', '!allowTransactions2') + return { + success: false, + reason: 'Network conditions to allow app init via set', + status: 500, + } + } + } + + // Now it is time to check rate limiting to see if our node can accept more transactions + if (this.rateLimiting.isOverloaded(txId)) { + //Skip load rejection according to the app + const isMultiSigFoundationTx = this.app.isMultiSigFoundationTx(tx) + if (isMultiSigFoundationTx) { + //dont rate limit multisig txs + nestedCountersInstance.countEvent('loadRelated', 'permitting foundation tx') + } else { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: reject_overload`) + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', 'isOverloaded') + return { success: false, reason: 'Maximum load exceeded.', status: 500 } + } + } + + try { + // Perform basic validation of the transaction fields + if (logFlags.verbose) this.mainLogger.debug('Performing initial validation of the transaction') + + // inputAppData is a new concept that allows the JSON RPC Server to pass in a TX as well as additional + // metadata for the dapp. This data is then passed into precrack to be used for whatever calculations are needed + let appData: any = inputAppData ?? {} + + const internalTx = this.app.isInternalTx(tx) + if (internalTx && !this.isInternalTxAllowed()) { + return { + success: false, + reason: `Internal transactions are not allowed in ${this.p2p.networkMode} Mode.`, + status: 500, + } + } + if (!internalTx && this.p2p.networkMode !== 'processing') { + return { + success: false, + reason: `Application transactions are only allowed in processing Mode.`, + status: 500, + } + } + if (!internalTx && !this.config.p2p.allowEndUserTxnInjections) { + return { + success: false, + reason: `Application transactions are turned off.`, + status: 500, + } + } + + const senderAddress = this.app.getTxSenderAddress(tx) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: sender:${senderAddress}`) + // Forward transaction to a node that has the account data locally if we don't have it + if (global === false) { + if (senderAddress == null) { + return { + success: false, + reason: `Sender address is not available.`, + status: 500, + } + } + const consensusGroup = this.getConsenusGroupForAccount(senderAddress) + const isConsensusNode = consensusGroup.some((node) => node.id === Self.id) + + if (Context.config.stateManager.forwardToLuckyNodes) { + if (isConsensusNode === false) { + // send transaction to lucky consensus group node + const result = await this.forwardTransactionToLuckyNodes( + senderAddress, + tx, + 'non-consensus to consensus', + '1' + ) + return result as Promise<{ success: boolean; reason: string; status: number; txId?: string }> + } + // careful we may be consensus node but if we are not lucky we should forward to lucky nodes + let luckyNodeIds = this.getClosestNodes( + senderAddress, + Context.config.stateManager.numberOfReInjectNodes, + false + ) + let isLuckyNode = luckyNodeIds.some((nodeId) => nodeId === Self.id) + if (isLuckyNode === false) { + const result = await this.forwardTransactionToLuckyNodes( + senderAddress, + tx, + 'non-lucky consensus to lucky' + ' consensus', + '2' + ) + return result as Promise<{ success: boolean; reason: string; status: number; txId?: string }> + } + } + } + + // we are consensus lucky node for this tx + let shouldAddToNonceQueue = false + let txNonce + if (internalTx === false) { + let senderAccountNonce = await this.app.getAccountNonce(senderAddress) + txNonce = await this.app.getNonceFromTx(tx) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: sNonce:${senderAccountNonce}`) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: txNonce:${txNonce}`) + + if (senderAccountNonce == null) { + if (this.config.mode === ShardusTypes.ServerMode.Release) { + return { + success: false, + reason: `Sender account nonce is not available. ${utils.stringifyReduce(tx)}`, + status: 500, + } + } + senderAccountNonce = BigInt(0) + } + + // app layer should return -1 if the account or tx does not have a nonce field + if (txNonce >= 0 && senderAccountNonce >= 0) { + if (txNonce < senderAccountNonce) { + if (logFlags.debug) this.mainLogger.debug(`txNonce < senderAccountNonce ${txNonce} < ${senderAccountNonce}`) + nestedCountersInstance.countEvent('rejected', 'txNonce < senderAccountNonce') + return { + success: false, + reason: `Transaction nonce is less than the account nonce. ${txNonce} < ${senderAccountNonce} ${utils.stringifyReduce( + tx + )} `, + status: 500, + } + } else if (txNonce > senderAccountNonce) { + // if the tx is already in the nonceQueue (based on txid not nonce), return an accepted response + const txInNonceQueue = this.stateManager.transactionQueue.isTxInPendingNonceQueue(senderAddress, txId) + if (txInNonceQueue) { + return { + success: true, + reason: `Transaction is already in pending nonce queue.`, + status: 200, + } + } + if (logFlags.debug) + this.mainLogger.debug( + `txNonce > senderAccountNonce ${txNonce} > ${senderAccountNonce} but txId is not in nonce queue yet` + ) + + // decide whether to put it in the nonce queue or not + const maxAllowedPendingNonce = senderAccountNonce + BigInt(Context.config.stateManager.maxPendingNonceTxs) + if (txNonce <= maxAllowedPendingNonce) { + shouldAddToNonceQueue = true + if (logFlags.debug) + this.mainLogger.debug(`txNonce > senderAccountNonce ${txNonce} > ${senderAccountNonce}`) + } else { + if (logFlags.debug) + this.mainLogger.debug( + `txNonce > senderAccountNonce ${txNonce} > ${senderAccountNonce} + ${Context.config.stateManager.maxPendingNonceTxs}` + ) + nestedCountersInstance.countEvent('rejected', 'txNonce > senderAccountNonce + maxPendingNonceTxs') + return { + success: false, + reason: `Transaction nonce ${txNonce.toString()} is greater than max allowed pending nonce of ${maxAllowedPendingNonce.toString()}`, + status: 500, + } + } + } + } + } + + const shouldQueueNonceButPoolIsFull = + shouldAddToNonceQueue && + this.config.stateManager.maxNonceQueueSize <= this.stateManager.transactionQueue.nonceQueue.size + + //ITN fix. There will be separate effort to protect the pool more intelligently for mainnet. + if (shouldQueueNonceButPoolIsFull) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: reject_nonce_full`) + nestedCountersInstance.countEvent('rejected', `Nonce pool is full, try again later`) + return { + success: false, + reason: `Nonce pool is full, try again later`, + status: 500, + } + } + if (shouldAddToNonceQueue) { + const nonceQueueEntry: NonceQueueItem = { + tx, + txId, + accountId: senderAddress, + nonce: txNonce, + appData, + global, + noConsensus, + } + let nonceQueueAddResult = this.stateManager.transactionQueue.addTransactionToNonceQueue(nonceQueueEntry) + + if (Context.config.stateManager.forwardToLuckyNodesNonceQueue) { + // if we ever support cancellation by using replacment for a TX that will change how we + // need to handle this run-away protection. may need to re-evaluate later + if ( + nonceQueueAddResult?.alreadyAdded === true && + Context.config.stateManager.forwardToLuckyNodesNonceQueueLimitFix + ) { + nestedCountersInstance.countEvent( + 'statistics', + `forwardTxToConsensusGroup: nonce queue skipped. we already have it` + ) + return { + success: true, + reason: `Transaction already added to pending nonce queue.`, + status: 200, + } + } + let result = this.forwardTransactionToLuckyNodes(senderAddress, tx, txId, 'consensus to consensus', '3') // don't wait here + return result as Promise<{ success: boolean; reason: string; status: number; txId?: string }> + } else { + return { + success: true, + reason: `Transaction added to pending nonce queue.`, + status: 200, + } + } + } else { + // tx nonce is equal to account nonce + let result = await this._timestampAndQueueTransaction(tx, appData, global, noConsensus, 'immediateQueue') + + // start of timestamp logging + if (logFlags.important_as_error) { + const txTimestamp = this.app.getTimestampFromTransaction(tx, appData) + const nowNodeTimestamp = shardusGetTime() + const ntpOffset = getNetworkTimeOffset() + /* prettier-ignore */ console.log(`TxnTS: shardus.put() txTimestamp=${txTimestamp}, nowNodeTimestamp=${nowNodeTimestamp}, ntpOffset=${ntpOffset}, txID=${txId}`) + } + // end of timestamp logging. + + return result + } + // Pass received txs to any subscribed 'DATA' receivers + // this.io.emit('DATA', tx) + } catch (err) { + this.shardus_fatal(`put_ex_` + err.message, `Put: Failed to process transaction. Exception: ${err}`) + this.fatalLogger.fatal('Put: ' + err.name + ': ' + err.message + ' at ' + err.stack) + return { + success: false, + reason: `Failed to process transaction: ${utils.stringifyReduce(tx)} ${inspect(err)}`, + status: 500, // 500 status code means transaction is generally failed + } + } finally { + this.profiler.profileSectionEnd('put') + } + }, + + async forwardTransactionToLuckyNodes( + senderAddress: string, + tx: ShardusTypes.OpaqueTransaction, + txId: string, + message = '', + context = '' + ): Promise { + let closetNodeIds = this.getClosestNodes(senderAddress, Context.config.stateManager.numberOfReInjectNodes, false) + const cycleShardData = this.stateManager.currentCycleShardData + const homeNode = ShardFunctions.findHomeNode( + cycleShardData.shardGlobals, + senderAddress, + cycleShardData.parititionShardDataMap + ) + if (homeNode == null) { + return { success: false, reason: `Home node not found for account ${senderAddress}`, status: 500 } + } + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `forwardTransactionToLuckyNodes: homeNode: ${homeNode.node.id} closetNodeIds: ${Utils.safeStringify( closetNodeIds.sort() )}` ) + + let selectedValidators = [] + if (Self.id != homeNode.node.id) + selectedValidators.push({ + id: homeNode.node.id, + ip: homeNode.node.externalIp, + port: homeNode.node.externalPort, + publicKey: homeNode.node.publicKey, + }) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_homenode_${context} ${NodeList.activeIdToPartition.get(homeNode.node.id)}`) + + let stats = { + skippedSelf: 0, + skippedRotation: 0, + skippedHome: 0, + ok_inQ: 0, + ok_inQ2: 0, + ok_addQ: 0, + } + + for (const id of closetNodeIds) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_closetnode_${context} ${NodeList.activeIdToPartition.get(id)}`) + if (id === Self.id) { + stats.skippedSelf++ + continue + } + if (id === homeNode.node.id) { + stats.skippedHome++ + continue // we already added the home node + } + let node = nodes.get(id) + + //is this node safe in terms of rotation + let rotationCheckPassed = true + if (Context.config.stateManager.forwardToLuckyNodesCheckRotation) { + //is in rotation means it in the edge + rotationCheckPassed = isNodeInRotationBounds(id) === false + } + + // if the node is not active or not in rotation bounds, skip it + if (node.status !== 'active' || rotationCheckPassed === false) { + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `forwardTransactionToLuckyNodes: node ${id} is not active or in rotation bounds. node.status: ${ node.status } isNodeInRotationBounds: ${isNodeInRotationBounds(id)}` ) + stats.skippedRotation++ + continue + } + const validatorDetails = { + id: node.id, + ip: node.externalIp, + port: node.externalPort, + publicKey: node.publicKey, + } + selectedValidators.push(validatorDetails) + } + + let successCount = 0 + let failedCount = 0 + for (const validator of selectedValidators) { + try { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_req_${context} ${NodeList.activeIdToPartition.get(validator.id)}`) + + if (validator.id === homeNode.node.id) { + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `Forwarding injected tx ${txId} to home node ${validator.id} reason: ${message} ${Utils.safeStringify(tx)}` ) + nestedCountersInstance.countEvent('statistics', `forwardTxToHomeNode: ${message}`) + } else { + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `Forwarding injected tx ${txId} to consensus group. reason: ${message} ${Utils.safeStringify(tx)}` ) + nestedCountersInstance.countEvent('statistics', `forwardTxToConsensusGroup: ${message}`) + } + + const result: ShardusTypes.InjectTxResponse = await this.app.injectTxToConsensor([validator], tx) + + if (result == null) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_null_${context} ${NodeList.activeIdToPartition.get(validator.id)}`) + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `Got null/undefined response upon forwarding injected tx: ${txId} to node ${validator.id}` ) + failedCount++ + continue + } + if (result && result.success === false) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_false_${context} ${NodeList.activeIdToPartition.get(validator.id)}`) + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `Got unsuccessful response upon forwarding injected tx: ${validator.id}. ${message} ${Utils.safeStringify(tx)}` ) + failedCount++ + continue + } + if (result && result.success === true) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_success_${context} ${NodeList.activeIdToPartition.get(validator.id)}`) + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `Got successful response upon forwarding injected tx: ${validator.id}. ${message} ${Utils.safeStringify(tx)}` ) + + if (result.reason === 'Transaction is already in pending nonce queue.') { + stats.ok_inQ++ + } + if (result.reason === `Transaction already added to pending nonce queue.`) { + stats.ok_inQ2++ + } + if (result.reason === `Transaction added to pending nonce queue.`) { + stats.ok_addQ++ + } + + nestedCountersInstance.countEvent( + 'statistics', + `forward to lucky node success ${message} ${Utils.safeStringify(stats)}` + ) + if (Context.config.stateManager.forwardToLuckyMulti) { + successCount++ + continue + } + return { success: true, reason: 'Transaction forwarded to validators', status: 200 } + } + } catch (e) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_ex_${context} ${NodeList.activeIdToPartition.get(validator.id)}`) + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.error( `Forwarding injected tx to ${validator.id} failed. ${message} ${Utils.safeStringify(tx)} error: ${ e.stack }` ) + } + } + + if (successCount > 0) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: lucky_forward_success_${context}`) + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.debug( `Got successful response upon forwarding injected tx: ${message} ${Utils.safeStringify(tx)}` ) + nestedCountersInstance.countEvent( + 'statistics', + `forward to luck success ${message} failed/success/total: ${failedCount}/${successCount}/${selectedValidators.length}` + ) + return { success: true, reason: 'Transaction forwarded to validators', status: 200 } + } + + nestedCountersInstance.countEvent('statistics', `forward failed: ${message} ${Utils.safeStringify(stats)}`) + /* prettier-ignore */ if (logFlags.debug || logFlags.rotation) this.mainLogger.error( `Forwarding injected tx out of tries. ${Utils.safeStringify(stats)} ${Utils.safeStringify(tx)} ` ) + return { success: false, reason: 'No validators found to forward the transaction', status: 500 } + }, + + async _timestampAndQueueTransaction( + tx: ShardusTypes.OpaqueTransaction, + appData: any, + global = false, + noConsensus = false, + loggingContext = '' + ) { + // Give the dapp an opportunity to do some up front work and generate + // appData metadata for the applied TX + const { status: preCrackSuccess, reason } = await this.app.txPreCrackData(tx, appData) + if (this.config.stateManager.checkPrecrackStatus === true && preCrackSuccess === false) { + return { + success: false, + reason: `PreCrack has failed. ${reason}`, + status: 500, + } + } + + const injectedTimestamp = this.app.getTimestampFromTransaction(tx, appData) + + const txId = this.app.calculateTxId(tx) + let timestampReceipt: ShardusTypes.TimestampReceipt + let isMissingInjectedTimestamp = !injectedTimestamp || injectedTimestamp === -1 + if (isMissingInjectedTimestamp) { + if (injectedTimestamp === -1) { + /* prettier-ignore */ + if (logFlags.p2pNonFatal && logFlags.console) console.log("Dapp request to generate a new timestmap for the tx"); + } + timestampReceipt = await this.stateManager.transactionConsensus.askTxnTimestampFromNode(txId) + /* prettier-ignore */ + if (logFlags.p2pNonFatal && logFlags.console) console.log("Network generated a" + + " timestamp", txId, timestampReceipt); + } + if (isMissingInjectedTimestamp && !timestampReceipt) { + this.shardus_fatal('put_noTimestamp', `Transaction timestamp cannot be determined ${utils.stringifyReduce(tx)} `) + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', `_timestampNotDetermined-${loggingContext}`) + return { + success: false, + reason: 'Transaction timestamp cannot be determined.', + status: 500, + } + } + let timestampedTx: ShardusTypes.TimestampedTx + if (timestampReceipt && timestampReceipt.timestamp) { + timestampedTx = { + tx, + timestampReceipt, + } + } else { + timestampedTx = { tx } + } + + // Perform fast validation of the transaction fields + const validateResult = this.app.validate(timestampedTx, appData) + if (validateResult.success === false) { + // 400 is a code for bad tx or client faulty + validateResult.status = validateResult.status ? validateResult.status : 400 + return validateResult + } + + // Ask App to crack open tx and return timestamp, id (hash), and keys + const { timestamp, id, keys, shardusMemoryPatterns } = this.app.crack(timestampedTx, appData) + + const uniqueTags = this.app.getUniqueAppTags?.(tx) + if (uniqueTags && Object.keys(uniqueTags).length > 0) { + const result = this.stateManager.transactionQueue.findEntryWithAnyTag(uniqueTags) + if (result) { + const { entry: existingEntry, matchedKey, matchedValue } = result + + if (logFlags.important_as_error) { + this.mainLogger.debug( + `Transaction rejected - unique app tag key ${matchedKey} with value ${matchedValue} already in use by tx: ${existingEntry.acceptedTx.txId}` + ) + } + + nestedCountersInstance.countEvent('rejected', 'duplicateUniqueAppTag') + return { + success: false, + reason: `Transaction contains a unique app tag key ${matchedKey} with value ${matchedValue} that is already in use`, + status: 400, + } + } + } + + // console.log('app.crack results', timestamp, id, keys) + + if (this.config.stateManager.checkDestLimits) { + try { + // does this TX need to be gated for potential infulencer-mode effects + const isDestLimitTx = this.app.isDestLimitTx(appData) + // this code must be upgraded before we turn the EVM on + if (isDestLimitTx && keys.targetKeys?.length > 0) { + const addressToCheck = keys.targetKeys[0] + const addressHitLimit = this.config.stateManager.checkDestLimitCount + // Add comprehensive check for addressToCheck validness + if (addressToCheck && typeof addressToCheck === 'string' && addressToCheck.trim() !== '') { + const addressSeenCount = this.stateManager.transactionQueue.addressCountInQueue( + addressToCheck, + addressHitLimit + ) + if (addressSeenCount >= addressHitLimit) { + /* prettier-ignore */ if(logFlags.error) this.shardus_fatal( `put_destLimitExceeded`, `Transaction has too many addresses in the queue: ${addressToCheck} ${utils.stringifyReduce(tx)}` ) + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', `addressSeenCount > ${addressHitLimit}`) + nestedCountersInstance.countEvent('destLimitCheck', `rejected addressSeenCount > ${addressHitLimit}`) + return { success: false, reason: 'Same destination load limit', status: 400 } + } else { + nestedCountersInstance.countEvent('destLimitCheck', `admitted: ${addressSeenCount}`) + } + } + } + } catch (err) { + // Log the error but continue processing the transaction + this.mainLogger.error(`Error in destination limit check: ${utils.formatErrorMessage(err)}`) + nestedCountersInstance.countEvent('destLimitCheck', 'error in check, error: ' + err?.message) + // if our dest check fails we must reject this tx + return { success: false, reason: 'Same destination load limit error', status: 400 } + } + } + + // Validate the transaction's sourceKeys & targetKeys + if (this.config.debug.checkAddressFormat && !isValidShardusAddress(keys.allKeys)) { + this.shardus_fatal( + `put_invalidAddress`, + `Invalid Shardus Address found: allKeys:${keys.allKeys} ${utils.stringifyReduce(tx)}` + ) + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', '_hasInvalidShardusAddresses') + return { success: false, reason: 'Invalid Shardus Addresses', status: 400 } + } + // Validate the transaction timestamp + let txExpireTimeMs = this.config.transactionExpireTime * 1000 + + if (global) { + txExpireTimeMs = 2 * 10 * 1000 //todo consider if this should be a config. + } + + if (inRangeOfCurrentTime(timestamp, txExpireTimeMs, txExpireTimeMs) === false) { + /* prettier-ignore */ + this.shardus_fatal(`tx_outofrange`, `Transaction timestamp out of range: timestamp:${timestamp} now:${shardusGetTime()} diff(now-ts):${shardusGetTime() - timestamp} ${utils.stringifyReduce(tx)} our offset: ${getNetworkTimeOffset()} loggingContext: ${loggingContext}`); + this.statistics.incrementCounter('txRejected') + nestedCountersInstance.countEvent('rejected', 'transaction timestamp out of range') + return { success: false, reason: 'Transaction timestamp out of range', status: 400 } + } + + this.profiler.profileSectionStart('put') + + //as ShardusMemoryPatternsInput + // Pack into acceptedTx, and pass to StateManager + const acceptedTX: ShardusTypes.AcceptedTx = { + timestamp, + txId: id, + keys, + data: timestampedTx, + appData, + shardusMemoryPatterns: shardusMemoryPatterns, + } + if (logFlags.verbose) this.mainLogger.debug('Transaction validated') + if (global === false) { + //temp way to make global modifying TXs not over count + this.statistics.incrementCounter('txInjected') + } + this.logger.playbackLogNote('tx_injected', `${txId}`, `Transaction: ${utils.stringifyReduce(timestampedTx)}`) + let added = this.stateManager.transactionQueue.routeAndQueueAcceptedTransaction( + acceptedTX, + /*send gossip*/ true, + null, + global, + noConsensus + ) + if (logFlags.verbose) { + this.mainLogger.debug(`End of injectTransaction ${utils.stringifyReduce(tx)}, added: ${added}`) + } + + return { + success: true, + reason: 'Transaction queued, poll for results.', + status: 200, // 200 status code means transaction is generally successful + txId, + } + } +} + diff --git a/src/state-manager/AccountPatcher.debug.ts b/src/state-manager/AccountPatcher.debug.ts new file mode 100644 index 000000000..5092cd152 --- /dev/null +++ b/src/state-manager/AccountPatcher.debug.ts @@ -0,0 +1,296 @@ +import * as utils from '../utils' +import { Utils } from '@shardeum-foundation/lib-types' +import { Response } from 'express-serve-static-core' + +interface Line { + raw: string + file: { + owner: string + } +} + +export const debugMethods = { + /** + * processShardDump + * debug only code to create a shard report. + * @param stream + * @param lines + */ + processShardDump( + stream: Response, number>, + lines: Line[] + ): { allPassed: boolean; allPassed2: boolean } { + const dataByParition = new Map() + + const rangesCovered = [] + const nodesListsCovered = [] + const nodeLists = [] + let newestCycle = -1 + const partitionObjects = [] + for (const line of lines) { + const index = line.raw.indexOf('{"allNodeIds') + if (index >= 0) { + const partitionStr = line.raw.slice(index) + //this.generalLog(string) + let partitionObj: { cycle: number; owner: string } + try { + partitionObj = Utils.safeJsonParse(partitionStr) + } catch (error) { + this.mainLogger.error('error parsing partitionObj', error, partitionStr) + continue + } + + if (newestCycle > 0 && partitionObj.cycle != newestCycle) { + stream.write( + `wrong cycle for node: ${line.file.owner} reportCycle:${newestCycle} thisNode:${partitionObj.cycle} \n` + ) + continue + } + partitionObjects.push(partitionObj) + + if (partitionObj.cycle > newestCycle) { + newestCycle = partitionObj.cycle + } + partitionObj.owner = line.file.owner //line.raw.slice(0, index) + } + } + + for (const partitionObj of partitionObjects) { + // we only want data for nodes that were active in the latest cycle. + if (partitionObj.cycle === newestCycle) { + for (const partition of partitionObj.partitions) { + let results = dataByParition.get(partition.parititionID) + if (results == null) { + results = [] + dataByParition.set(partition.parititionID, results) + } + results.push({ + owner: partitionObj.owner, + accounts: partition.accounts, + ownerId: partitionObj.rangesCovered.id, + accounts2: partition.accounts2, + partitionHash2: partition.partitionHash2, + }) + } + rangesCovered.push(partitionObj.rangesCovered) + nodesListsCovered.push(partitionObj.nodesCovered) + nodeLists.push(partitionObj.allNodeIds) + } + } + + // need to only count stuff from the newestCycle. + + // ///////////////////////////////////////////////// + // compare partition data: old system with data manual queried from app + let allPassed = true + // let uniqueVotesByPartition = new Array(numNodes).fill(0) + for (const [key, value] of dataByParition) { + const results = value + const votes = {} + for (const entry of results) { + if (entry.accounts.length === 0) { + // new settings allow for not using accounts from sql + continue + } + entry.accounts.sort(function (a: { id: number }, b: { id: number }) { + return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 + }) + const string = utils.stringifyReduce(entry.accounts) + let voteEntry = votes[string] // eslint-disable-line security/detect-object-injection + if (voteEntry == null) { + voteEntry = {} + voteEntry.voteCount = 0 + voteEntry.ownerIds = [] + votes[string] = voteEntry // eslint-disable-line security/detect-object-injection + } + voteEntry.voteCount++ + votes[string] = voteEntry // eslint-disable-line security/detect-object-injection + + voteEntry.ownerIds.push(entry.ownerId) + } + for (const key2 of Object.keys(votes)) { + const voteEntry = votes[key2] // eslint-disable-line security/detect-object-injection + let voters = '' + if (key2 !== '[]') { + voters = `---voters:${Utils.safeStringify(voteEntry.ownerIds)}` + } + + stream.write(`partition: ${key} votes: ${voteEntry.voteCount} values: ${key2} \t\t\t${voters}\n`) + // stream.write(` ---voters: ${JSON.stringify(voteEntry.ownerIds)}\n`) + } + const numUniqueVotes = Object.keys(votes).length + if (numUniqueVotes > 2 || (numUniqueVotes > 1 && votes['[]'] == null)) { + allPassed = false + stream.write(`partition: ${key} failed. Too many different version of data: ${numUniqueVotes} \n`) + } + } + stream.write(`partition tests all passed: ${allPassed}\n`) + // rangesCovered + + // ///////////////////////////////////////////////// + // compare partition data 2: new system using the state manager cache + let allPassed2 = true + // let uniqueVotesByPartition = new Array(numNodes).fill(0) + for (const [key, value] of dataByParition) { + const results = value + const votes = {} + for (const entry of results) { + // no account sort, we expect this to have a time sort! + // entry.accounts.sort(function (a, b) { return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 }) + const fullString = utils.stringifyReduce(entry.accounts2) + let string = entry.partitionHash2 + if (string === undefined) { + string = '[]' + } + + let voteEntry = votes[string] // eslint-disable-line security/detect-object-injection + if (voteEntry == null) { + voteEntry = {} + voteEntry.voteCount = 0 + voteEntry.ownerIds = [] + voteEntry.fullString = fullString + votes[string] = voteEntry // eslint-disable-line security/detect-object-injection + } + voteEntry.voteCount++ + votes[string] = voteEntry // eslint-disable-line security/detect-object-injection + + voteEntry.ownerIds.push(entry.ownerId) + } + for (const key2 of Object.keys(votes)) { + const voteEntry = votes[key2] // eslint-disable-line security/detect-object-injection + let voters = '' + if (key2 !== '[]') { + voters = `---voters:${Utils.safeStringify(voteEntry.ownerIds)}` + } + + stream.write( + `partition: ${key} votes: ${voteEntry.voteCount} values: ${key2} \t\t\t${voters}\t -details:${voteEntry.fullString} \n` + ) + // stream.write(` ---voters: ${JSON.stringify(voteEntry.ownerIds)}\n`) + } + const numUniqueVotes = Object.keys(votes).length + if (numUniqueVotes > 2 || (numUniqueVotes > 1 && votes['[]'] == null)) { + allPassed2 = false + stream.write(`partition: ${key} failed. Too many different version of data: ${numUniqueVotes} \n`) + } + } + + stream.write(`partition tests all passed: ${allPassed2}\n`) + + rangesCovered.sort(function (a, b) { + return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 + }) + + const isStored = function (i: number, rangeCovered: { stMin: number; stMax: number }): boolean { + const key = i + const minP = rangeCovered.stMin + const maxP = rangeCovered.stMax + if (minP === maxP) { + if (i !== minP) { + return false + } + } else if (maxP > minP) { + // are we outside the min to max range + if (key < minP || key > maxP) { + return false + } + } else { + // are we inside the min to max range (since the covered rage is inverted) + if (key > maxP && key < minP) { + return false + } + } + return true + } + const isConsensus = function (i: number, rangeCovered: { cMin: number; cMax: number }): boolean { + const key = i + const minP = rangeCovered.cMin + const maxP = rangeCovered.cMax + if (minP === maxP) { + if (i !== minP) { + return false + } + } else if (maxP > minP) { + // are we outside the min to max range + if (key < minP || key > maxP) { + return false + } + } else { + // are we inside the min to max range (since the covered rage is inverted) + if (key > maxP && key < minP) { + return false + } + } + return true + } + + for (const range of rangesCovered) { + let partitionGraph = '' + for (let i = 0; i < range.numP; i++) { + const isC = isConsensus(i, range) + const isSt = isStored(i, range) + + if (i === range.hP) { + partitionGraph += 'H' + } else if (isC && isSt) { + partitionGraph += 'C' + } else if (isC) { + partitionGraph += '!' + } else if (isSt) { + partitionGraph += 'e' + } else { + partitionGraph += '_' + } + } + + stream.write( + `node: ${range.id} ${range.ipPort}\tgraph: ${partitionGraph}\thome: ${range.hP} data:${Utils.safeStringify( + range + )}\n` + ) + } + stream.write(`\n\n`) + nodesListsCovered.sort(function (a, b) { + return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 + }) + for (const nodesCovered of nodesListsCovered) { + let partitionGraph = '' + const consensusMap = {} + const storedMap = {} + for (const entry of nodesCovered.consensus) { + consensusMap[entry.idx] = { hp: entry.hp } + } + for (const entry of nodesCovered.stored) { + storedMap[entry.idx] = { hp: entry.hp } + } + + for (let i = 0; i < nodesCovered.numP; i++) { + const isC = consensusMap[i] != null // eslint-disable-line security/detect-object-injection + const isSt = storedMap[i] != null // eslint-disable-line security/detect-object-injection + if (i === nodesCovered.idx) { + partitionGraph += 'O' + } else if (isC && isSt) { + partitionGraph += 'C' + } else if (isC) { + partitionGraph += '!' + } else if (isSt) { + partitionGraph += 'e' + } else { + partitionGraph += '_' + } + } + + stream.write( + `node: ${nodesCovered.id} ${nodesCovered.ipPort}\tgraph: ${partitionGraph}\thome: ${ + nodesCovered.hP + } data:${Utils.safeStringify(nodesCovered)}\n` + ) + } + stream.write(`\n\n`) + for (const list of nodeLists) { + stream.write(`${Utils.safeStringify(list)} \n`) + } + + return { allPassed, allPassed2 } + } +} \ No newline at end of file diff --git a/src/state-manager/AccountPatcher.finder.ts b/src/state-manager/AccountPatcher.finder.ts new file mode 100644 index 000000000..33c915c08 --- /dev/null +++ b/src/state-manager/AccountPatcher.finder.ts @@ -0,0 +1,430 @@ +import { AccountIDAndHash, AccountIdAndHashToRepair, RadixAndHashWithNodeId, RadixAndHash } from './state-manager-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as NodeList from '../p2p/NodeList' +import { logFlags } from '../logger' +import * as utils from '../utils' +import { Utils } from '@shardeum-foundation/lib-types' +import { AccountHashCache, AccountHashCacheHistory, HashTrieAccountsResp, HashTrieNode } from './state-manager-types' +import { Node } from '../shardus/shardus-types' + +interface BadAccountsInfo { + badAccounts: AccountIDAndHash[] + hashesPerLevel: number[] + checkedKeysPerLevel: any[] + requestedKeysPerLevel: number[] + badHashesPerLevel: number[] + accountHashesChecked: any + stats: any + extraBadAccounts: AccountIdAndHashToRepair[] + extraBadKeys: RadixAndHashWithNodeId[] + accountsTheyNeedToRepair: AccountIdAndHashToRepair[] +} + +export const finderMethods = { + /*** + * ######## #### ## ## ######## ######## ### ######## ### ###### ###### ####### ## ## ## ## ######## ###### + * ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## + * ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## + * ###### ## ## ## ## ## ## ######## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ###### + * ## ## ## #### ## ## ## ## ######### ## ## ######### ## ## ## ## ## ## ## #### ## ## + * ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## + * ## #### ## ## ######## ######## ## ## ######## ## ## ###### ###### ####### ####### ## ## ## ###### + */ + /** + * findBadAccounts + * + * starts at the sync level hashes that dont match and queries for child nodes to get more details about + * what accounts could possibly be bad. At the lowest level gets a list of accounts and hashes + * We double check out cache values before returning a list of bad accounts that need repairs. + * + * @param cycle + */ + async findBadAccounts(cycle: number): Promise { + let badAccounts: AccountIDAndHash[] = [] + let accountsTheyNeedToRepair: AccountIdAndHashToRepair[] = [] + let accountsWeNeedToRepair: AccountIDAndHash[] = [] + const hashesPerLevel: number[] = Array(this.treeMaxDepth + 1).fill(0) + const checkedKeysPerLevel = Array(this.treeMaxDepth) + const badHashesPerLevel: number[] = Array(this.treeMaxDepth + 1).fill(0) + const requestedKeysPerLevel: number[] = Array(this.treeMaxDepth + 1).fill(0) + + let level = this.treeSyncDepth + let badLayerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + const syncTrackerRanges = this.getSyncTrackerRanges() + + const stats = { + testedSyncRadix: 0, + skippedSyncRadix: 0, + badSyncRadix: 0, + ok_noTrieAcc: 0, + ok_trieHashBad: 0, + fix_butHashMatch: 0, + fixLastSeen: 0, + needsVotes: 0, + subHashesTested: 0, + trailColdLevel: 0, + checkedLevel: 0, + leafsChecked: 0, + leafResponses: 0, + getAccountHashStats: {}, + } + let extraBadKeys: RadixAndHashWithNodeId[] = [] + let extraBadAccounts: AccountIdAndHashToRepair[] = [] + + const minVotes = this.calculateMinVotes() + + const goodVotes: RadixAndHash[] = [] + const hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(cycle) + for (const radix of hashTrieSyncConsensus.radixHashVotes.keys()) { + const votesMap = hashTrieSyncConsensus.radixHashVotes.get(radix) + let isSyncingRadix = false + + if (votesMap.bestVotes < minVotes) { + stats.needsVotes++ + if (logFlags.debug) { + //overkill, need it for now + const kvp = [] + for (const [key, value] of votesMap.allVotes.entries()) { + kvp.push({ + id: key, + count: value.count, + nodeIDs: [...value.voters].map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort), + }) + } + const simpleMap = { + bestHash: votesMap.bestHash, + bestVotes: votesMap.bestVotes, + allVotes: kvp, + } + /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `not enough votes ${radix} ${utils.makeShortHash(votesMap.bestHash)} uniqueVotes: ${votesMap.allVotes.size}`, 1) + this.statemanager_fatal( + 'debug findBadAccounts', + `debug findBadAccounts ${cycle}: ${radix} bestVotes${ + votesMap.bestVotes + } < minVotes:${minVotes} uniqueVotes: ${votesMap.allVotes.size} ${utils.stringifyReduce(simpleMap)}` + ) + } + // skipping 50% votes restriction to allow patcher to do account based patching + // continue + } + + //do we need to filter out a vote? + for (const range of syncTrackerRanges) { + if (radix >= range.low && radix <= range.high) { + isSyncingRadix = true + break + } + } + if (isSyncingRadix === true) { + stats.skippedSyncRadix++ + continue + } + stats.testedSyncRadix++ + goodVotes.push({ radix, hash: votesMap.bestHash }) + } + + let toFix = this.diffConsenus(goodVotes, badLayerMap) + + stats.badSyncRadix = toFix.length + + if (logFlags.debug) { + toFix.sort(this.sortByRadix) + this.statemanager_fatal( + 'debug findBadAccounts', + `debug findBadAccounts ${cycle}: toFix: ${utils.stringifyReduce(toFix)}` + ) + for (let radixToFix of toFix) { + const votesMap = hashTrieSyncConsensus.radixHashVotes.get(radixToFix.radix) + let hasNonConsensusRange = false + let hasNonStorageRange = false + + const nonConsensusRanges = this.getNonConsensusRanges(cycle) + const nonStorageRange = this.getNonStoredRanges(cycle) + for (const range of nonConsensusRanges) { + if (radixToFix.radix >= range.low && radixToFix.radix <= range.high) { + hasNonConsensusRange = true + nestedCountersInstance.countEvent(`accountPatcher`, `findBadAccounts hasNonConsensusRange`, 1) + } + } + for (const range of nonStorageRange) { + if (radixToFix.radix >= range.low && radixToFix.radix <= range.high) { + hasNonStorageRange = true + nestedCountersInstance.countEvent(`accountPatcher`, `findBadAccounts hasNonStorageRange`, 1) + } + } + + const kvp = [] + for (const [key, value] of votesMap.allVotes.entries()) { + kvp.push({ + id: key, + count: value.count, + nodeIDs: [...value.voters].map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort), + }) + } + const simpleMap = { + bestHash: votesMap.bestHash, + bestVotes: votesMap.bestVotes, + allVotes: kvp, + } + this.statemanager_fatal( + 'debug findBadAccounts', + `debug findBadAccounts ${cycle}: ${ + radixToFix.radix + } isInNonConsensusRange: ${hasNonConsensusRange} isInNonStorageRange: ${hasNonStorageRange} bestVotes ${ + votesMap.bestVotes + } minVotes:${minVotes} uniqueVotes: ${votesMap.allVotes.size} ${utils.stringifyReduce(simpleMap)}` + ) + } + } + + //record some debug info + badHashesPerLevel[level] = toFix.length // eslint-disable-line security/detect-object-injection + checkedKeysPerLevel[level] = toFix.map((x) => x.radix) // eslint-disable-line security/detect-object-injection + requestedKeysPerLevel[level] = goodVotes.length // eslint-disable-line security/detect-object-injection + hashesPerLevel[level] = goodVotes.length // eslint-disable-line security/detect-object-injection + + this.computeCoverage(cycle) + + stats.checkedLevel = level + //refine our query until we get to the lowest level + while (level < this.treeMaxDepth && toFix.length > 0) { + level++ + stats.checkedLevel = level + badLayerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + const remoteChildrenToDiff: RadixAndHashWithNodeId[] = await this.getChildrenOf(toFix, cycle) + + if (remoteChildrenToDiff == null) { + nestedCountersInstance.countEvent( + `accountPatcher`, + `findBadAccounts remoteChildrenToDiff == null for radixes: ${Utils.safeStringify(toFix)}, cycle: ${cycle}`, + 1 + ) + } + if (remoteChildrenToDiff.length === 0) { + nestedCountersInstance.countEvent( + `accountPatcher`, + `findBadAccounts remoteChildrenToDiff.length = 0 for radixes: ${Utils.safeStringify(toFix)}, cycle: ${cycle}`, + 1 + ) + } + + this.mainLogger.debug( + `findBadAccounts ${cycle}: level: ${level}, toFix: ${toFix.length}, childrenToDiff: ${Utils.safeStringify( + remoteChildrenToDiff + )}, badLayerMap: ${Utils.safeStringify(badLayerMap)}` + ) + toFix = this.diffConsenus(remoteChildrenToDiff, badLayerMap) + + stats.subHashesTested += toFix.length + + if (toFix.length === 0) { + stats.trailColdLevel = level + extraBadKeys = this.findExtraBadKeys(remoteChildrenToDiff, badLayerMap) + + let result = { + nodeChildHashes: [], + stats: { + matched: 0, + visisted: 0, + empty: 0, + childCount: 0, + }, + } as HashTrieAccountsResp + + let allLeafNodes: HashTrieNode[] = [] + + for (const radixAndHash of extraBadKeys) { + let level = radixAndHash.radix.length + while (level < this.treeMaxDepth) { + level++ + const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + if (layerMap == null) { + /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) + break + } + const hashTrieNode = layerMap.get(radixAndHash.radix) + if (hashTrieNode != null && hashTrieNode.accounts != null) { + result.stats.visisted++ + const childAccounts = [] + result.nodeChildHashes.push({ radix: radixAndHash.radix, childAccounts }) + for (const account of hashTrieNode.accounts) { + childAccounts.push({ accountID: account.accountID, hash: account.hash }) + extraBadAccounts.push({ + accountID: account.accountID, + hash: account.hash, + targetNodeId: radixAndHash.nodeId, + }) + result.stats.childCount++ + } + if (hashTrieNode.accounts.length === 0) { + result.stats.empty++ + } + } + } + } + + for (const radixAndHash of extraBadKeys) { + const radix = radixAndHash.radix + result.stats.visisted++ + const level = radix.length + const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + if (layerMap == null) { + /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) + break + } + + const currentNode = layerMap.get(radix) + const leafs: HashTrieNode[] = this.extractLeafNodes(currentNode) + for (const leaf of leafs) { + if (leaf != null && leaf.accounts != null) { + result.stats.matched++ + const childAccounts = [] + result.nodeChildHashes.push({ radix, childAccounts }) + for (const account of leaf.accounts) { + childAccounts.push({ accountID: account.accountID, hash: account.hash }) + extraBadAccounts.push({ + accountID: account.accountID, + hash: account.hash, + targetNodeId: radixAndHash.nodeId, + }) + result.stats.childCount++ + } + if (leaf.accounts.length === 0) { + result.stats.empty++ + } + } + } + } + + if (extraBadKeys.length > 0) { + toFix = toFix.concat(extraBadKeys) + break + } + } + + //record some debug info + badHashesPerLevel[level] = toFix.length // eslint-disable-line security/detect-object-injection + checkedKeysPerLevel[level] = toFix.map((x) => x.radix) // eslint-disable-line security/detect-object-injection + requestedKeysPerLevel[level] = remoteChildrenToDiff.length // eslint-disable-line security/detect-object-injection + hashesPerLevel[level] = remoteChildrenToDiff.length // eslint-disable-line security/detect-object-injection + // badLayerMap.size ...badLayerMap could be null! + } + + stats.leafsChecked = toFix.length + //get bad accounts from the leaf nodes + const { radixAndChildHashes, getAccountHashStats } = await this.getChildAccountHashes(toFix, cycle) + stats.getAccountHashStats = getAccountHashStats + + stats.leafResponses = radixAndChildHashes.length + + let accountHashesChecked = 0 + for (const radixAndChildHash of radixAndChildHashes) { + accountHashesChecked += radixAndChildHash.childAccounts.length + + const badTreeNode = badLayerMap.get(radixAndChildHash.radix) + if (badTreeNode != null) { + const localAccountsMap = new Map() + const remoteAccountsMap = new Map() + if (badTreeNode.accounts != null) { + for (let i = 0; i < badTreeNode.accounts.length; i++) { + if (badTreeNode.accounts[i] == null) continue + localAccountsMap.set(badTreeNode.accounts[i].accountID, badTreeNode.accounts[i]) // eslint-disable-line security/detect-object-injection + } + } + for (let account of radixAndChildHash.childAccounts) { + remoteAccountsMap.set(account.accountID, { account, nodeId: radixAndChildHash.nodeId }) + } + if (radixAndChildHash.childAccounts.length > localAccountsMap.size) { + /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent(`accountPatcher`, `remote trie node has more accounts, radix: ${radixAndChildHash.radix}`) + } else if (radixAndChildHash.childAccounts.length < localAccountsMap.size) { + /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent(`accountPatcher`, `remote trie node has less accounts than local trie node, radix: ${radixAndChildHash.radix}`) + } else if (radixAndChildHash.childAccounts.length === localAccountsMap.size) { + /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent(`accountPatcher`, `remote trie node has same number of accounts as local trie node, radix: ${radixAndChildHash.radix}`) + } + for (let i = 0; i < radixAndChildHash.childAccounts.length; i++) { + const potentalGoodAcc = radixAndChildHash.childAccounts[i] // eslint-disable-line security/detect-object-injection + const potentalBadAcc = localAccountsMap.get(potentalGoodAcc.accountID) + + //check if our cache value has matching hash already. The trie can lag behind. + // todo would be nice to find a way to reduce this, possibly by better control of syncing ranges. + // (we are not supposed to test syncing ranges , but maybe that is out of phase?) + + //only do this check if the account is new. It was skipping potential oos situations. + const accountMemData: AccountHashCache = this.stateManager.accountCache.getAccountHash( + potentalGoodAcc.accountID + ) + if (accountMemData != null && accountMemData.h === potentalGoodAcc.hash) { + if (accountMemData.c >= cycle - 1) { + if (potentalBadAcc != null) { + if (potentalBadAcc.hash != potentalGoodAcc.hash) { + stats.ok_trieHashBad++ // mem account is good but trie account is bad + } + } else { + stats.ok_noTrieAcc++ // no trie account at all + } + + //this was in cache, but stale so we can reinstate the cache since it still matches the group consensus + const accountHashCacheHistory: AccountHashCacheHistory = + this.stateManager.accountCache.getAccountHashHistoryItem(potentalGoodAcc.accountID) + if ( + accountHashCacheHistory != null && + accountHashCacheHistory.lastStaleCycle >= accountHashCacheHistory.lastSeenCycle + ) { + stats.fixLastSeen++ + accountHashCacheHistory.lastSeenCycle = cycle + } + //skip out + continue + } else { + //dont skip out! + //cache matches but trie hash is bad + stats.fix_butHashMatch++ + //actually we can repair trie here: + this.updateAccountHash(potentalGoodAcc.accountID, potentalGoodAcc.hash) + continue + } + } + + //is the account missing or wrong hash? + if (potentalBadAcc != null) { + if (potentalBadAcc.hash != potentalGoodAcc.hash) { + badAccounts.push(potentalGoodAcc) + } + } else { + badAccounts.push(potentalGoodAcc) + } + } + for (let i = 0; i < badTreeNode.accounts.length; i++) { + const localAccount = badTreeNode.accounts[i] // eslint-disable-line security/detect-object-injection + if (localAccount == null) continue + const remoteNodeItem = remoteAccountsMap.get(localAccount.accountID) + if (remoteNodeItem == null) { + accountsWeNeedToRepair.push(localAccount) + continue + } + const { account: remoteAccount, nodeId: targetNodeId } = remoteNodeItem + if (remoteAccount == null) { + accountsTheyNeedToRepair.push({ ...localAccount, targetNodeId }) + } + } + } else { + badAccounts = badAccounts.concat(radixAndChildHash.childAccounts) + } + } + if (accountsTheyNeedToRepair.length > 0) { + nestedCountersInstance.countEvent(`accountPatcher`, `accountsTheyNeedToRepair`, accountsTheyNeedToRepair.length) + } + return { + badAccounts, + hashesPerLevel, + checkedKeysPerLevel, + requestedKeysPerLevel, + badHashesPerLevel, + accountHashesChecked, + stats, + extraBadAccounts, + extraBadKeys, + accountsTheyNeedToRepair, + } + } +} \ No newline at end of file diff --git a/src/state-manager/AccountPatcher.handlers.ts b/src/state-manager/AccountPatcher.handlers.ts new file mode 100644 index 000000000..b4c0bcd10 --- /dev/null +++ b/src/state-manager/AccountPatcher.handlers.ts @@ -0,0 +1,1507 @@ +import * as Self from '../p2p/Self' +import * as Context from '../p2p/Context' +import * as NodeList from '../p2p/NodeList' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { profilerInstance } from '../utils/profiler' +import { isDebugModeMiddleware, isDebugModeMiddlewareLow, isDebugModeMiddlewareMedium } from '../network/debugMiddleware' +import { logFlags } from '../logger' +import * as utils from '../utils' +import { Utils } from '@shardeum-foundation/lib-types' +import { shardusGetTime } from '../network' +import { + AccountHashCache, + AccountHashCacheHistory, + AccountIDAndHash, + AccountIdAndHashToRepair, + HashTrieAccountDataRequest, + HashTrieAccountDataResponse, + HashTrieAccountsResp, + HashTrieNode, + HashTrieRadixCoverage, + HashTrieReq, + HashTrieResp, + HashTrieSyncConsensus, + HashTrieSyncTell, + HashTrieUpdateStats, + RadixAndHashWithNodeId, + RadixAndChildHashesWithNodeId, + RadixAndHash, + TrieAccount, +} from './state-manager-types' +import { InternalBinaryHandler } from '../types/Handler' +import { Route } from '@shardeum-foundation/lib-types/build/src/p2p/P2PTypes' +import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { Request, Response } from 'express-serve-static-core' +import { P2P } from '@shardeum-foundation/lib-types' +import { appdata_replacer } from '../utils' +import { WrappedData } from '../types/WrappedData' +import { + deserializeGetAccountDataByHashesResp, + GetAccountDataByHashesResp, + serializeGetAccountDataByHashesResp, +} from '../types/GetAccountDataByHashesResp' +import { + deserializeGetAccountDataByHashesReq, + GetAccountDataByHashesReq, + serializeGetAccountDataByHashesReq, +} from '../types/GetAccountDataByHashesReq' +import { + GetTrieHashesResponse, + serializeGetTrieHashesResp, + deserializeGetTrieHashesResp, +} from '../types/GetTrieHashesResp' +import { GetTrieHashesRequest, deserializeGetTrieHashesReq, serializeGetTrieHashesReq } from '../types/GetTrieHashesReq' +import { + GetTrieAccountHashesReq, + deserializeGetTrieAccountHashesReq, + serializeGetTrieAccountHashesReq, +} from '../types/GetTrieAccountHashesReq' +import { + GetTrieAccountHashesResp, + deserializeGetTrieAccountHashesResp, + serializeGetTrieAccountHashesResp, +} from '../types/GetTrieAccountHashesResp' +import { BadRequest, InternalError, serializeResponseError } from '../types/ResponseError' +import { + RepairOOSAccountsReq, + deserializeRepairOOSAccountsReq, + serializeRepairOOSAccountsReq, +} from '../types/RepairOOSAccountsReq' +import { getStreamWithTypeCheck, requestErrorHandler } from '../types/Helpers' +import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' +import { robustQuery } from '../p2p/Utils' +import { RequestReceiptForTxReqSerialized, serializeRequestReceiptForTxReq } from '../types/RequestReceiptForTxReq' +import { deserializeRequestReceiptForTxResp, RequestReceiptForTxRespSerialized } from '../types/RequestReceiptForTxResp' +import { + SyncTrieHashesRequest, + deserializeSyncTrieHashesReq, + serializeSyncTrieHashesReq, +} from '../types/SyncTrieHashesReq' +import { errorToStringFull } from '../utils' +import { Node } from '../shardus/shardus-types' + +export const handlerMethods = { + setupHandlers(): void { + // this.p2p.registerInternal( + // 'get_trie_hashes', + // async ( + // payload: HashTrieReq, + // respond: (arg0: HashTrieResp) => Promise, + // _sender: unknown, + // _tracker: string, + // msgSize: number + // ) => { + // profilerInstance.scopedProfileSectionStart('get_trie_hashes', false, msgSize) + // const result = { nodeHashes: [], nodeId: Self.id } as HashTrieResp + // let responseCount = 0 + // let respondSize + + // if (Self.isFailed) { + // respondSize = await respond(result) + // } else { + // for (const radix of payload.radixList) { + // const level = radix.length + // const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + // if (layerMap == null) { + // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes badrange:${level}`) + // break + // } + + // const hashTrieNode = layerMap.get(radix) + // if (hashTrieNode != null) { + // for (const childTreeNode of hashTrieNode.children) { + // if (childTreeNode != null) { + // result.nodeHashes.push({ radix: childTreeNode.radix, hash: childTreeNode.hash }) + // responseCount++ + // } + // } + // } + // } + + // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, responseCount) + + // // todo could recored a split time here.. so we know time spend on handling the request vs sending the response? + // // that would not be completely accurate because the time to get the data is outide of this handler... + // respondSize = await respond(result) + // } + // profilerInstance.scopedProfileSectionEnd('get_trie_hashes', respondSize) + // } + // ) + + // this.p2p.registerInternal( + // 'repair_oos_accounts', + // async ( + // payload: {repairInstructions: AccountRepairInstruction[]}, + // respond: (arg0: boolean) => Promise, + // _sender: unknown, + // _tracker: string, + // msgSize: number + // ) => { + // profilerInstance.scopedProfileSectionStart('repair_oos_accounts', false, msgSize) + + // try { + // for (const repairInstruction of payload?.repairInstructions) { + // const { accountID, txId, hash, accountData, targetNodeId, receipt2 } = repairInstruction + + // // check if we are the target node + // if (targetNodeId !== Self.id) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: not target node for txId: ${txId}`) + // continue + // } + + // // check if we cover this accountId + // const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountID) + // const isInStorageGroup = storageNodes.map((node) => node.id).includes(Self.id) + // if (!isInStorageGroup) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: not in storage group for account: ${accountID}`) + // continue + // } + // // check if we have already repaired this account + // const accountHashCache = this.stateManager.accountCache.getAccountHash(accountID) + // if (accountHashCache != null && accountHashCache.h === hash) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: already repaired account: ${accountID}`) + // continue + // } + // if (accountHashCache != null && accountHashCache.t > accountData.timestamp) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: we have newer account: ${accountID}`) + // continue + // } + + // const archivedQueueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, 'repair_oos_accounts') + + // if (archivedQueueEntry == null) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: no archivedQueueEntry for txId: ${txId}`) + // this.mainLogger.debug(`repair_oos_accounts: no archivedQueueEntry for txId: ${txId}`) + // continue + // } + + // // check the vote and confirmation status of the tx + // const bestMessage = receipt2.confirmOrChallenge + // const receivedBestVote = receipt2.appliedVote + + // if (receivedBestVote != null) { + // // Check if vote is from eligible list of voters for this TX + // if(this.stateManager.transactionQueue.useNewPOQ && !archivedQueueEntry.eligibleNodeIdsToVote.has(receivedBestVote.node_id)) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote from ineligible node for txId: ${txId}`) + // continue + // } + + // // Check signature of the vote + // if (!this.crypto.verify( + // receivedBestVote as SignedObject, + // archivedQueueEntry.executionGroupMap.get(receivedBestVote.node_id).publicKey + // )) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote signature invalid for txId: ${txId}`) + // continue + // } + + // // Check transaction result from vote + // if (!receivedBestVote.transaction_result) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote result not true for txId ${txId}`) + // continue + // } + + // // Check account hash. Calculate account hash of account given in instruction + // // and compare it with the account hash in the vote. + // const calculatedAccountHash = this.app.calculateAccountHash(accountData.data) + // let accountHashMatch = false + // for (let i = 0; i < receivedBestVote.account_id.length; i++) { + // if (receivedBestVote.account_id[i] === accountID) { + // if (receivedBestVote.account_state_hash_after[i] !== calculatedAccountHash) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: account hash mismatch for txId: ${txId}`) + // accountHashMatch = false + // } else { + // accountHashMatch = true + // } + // break + // } + // } + // if (accountHashMatch === false) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote account hash mismatch for txId: ${txId}`) + // continue + // } + // } else { + // // Skip this account apply as we were not able to get the best vote for this tx + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: no vote for txId: ${txId}`) + // continue + // } + + // if (this.stateManager.transactionQueue.useNewPOQ) { + // if (bestMessage != null) { + // // Skip if challenge receipt + // if (bestMessage.message === 'challenge') { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: challenge for txId: ${txId}`) + // continue + // } + + // // Check if mesasge is from eligible list of responders for this TX + // if(!archivedQueueEntry.eligibleNodeIdsToConfirm.has(bestMessage.nodeId)) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: confirmation from ineligible node for txId: ${txId}`) + // continue + // } + + // // Check signature of the message + // if(!this.crypto.verify( + // bestMessage as SignedObject, + // archivedQueueEntry.executionGroupMap.get(bestMessage.nodeId).publicKey + // )) { + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: confirmation signature invalid for txId: ${txId}`) + // continue + // } + // } else { + // // Skip this account apply as we were not able to get the best confirmation for this tx + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: no confirmation for txId: ${txId}`) + // continue + // } + // } + + // // update the account data (and cache?) + // const updatedAccounts: string[] = [] + // //save the account data. note this will make sure account hashes match the wrappers and return failed + // // hashes that don't match + // const failedHashes = await this.stateManager.checkAndSetAccountData( + // [accountData], + // `repair_oos_accounts:${txId}`, + // true, + // updatedAccounts + // ) + // if (logFlags.debug) this.mainLogger.debug(`repair_oos_accounts: ${updatedAccounts.length} updated, ${failedHashes.length} failed`) + // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts:${updatedAccounts.length} updated, accountId: ${utils.makeShortHash(accountID)}, cycle: ${this.stateManager.currentCycleShardData.cycleNumber}`) + // if (failedHashes.length > 0) nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts:${failedHashes.length} failed`) + // let success = false + // if (updatedAccounts.length > 0 && failedHashes.length === 0) { + // success = true + // } + // } + // await respond(true) + // } catch (e) { + // } + + // profilerInstance.scopedProfileSectionEnd('repair_oos_accounts') + // } + // ) + + const repairMissingAccountsBinary: Route> = { + name: InternalRouteEnum.binary_repair_oos_accounts, + handler: async (payloadBuffer, respond, header, sign) => { + const route = InternalRouteEnum.binary_repair_oos_accounts + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, false, payloadBuffer.length) + try { + const requestStream = getStreamWithTypeCheck(payloadBuffer, TypeIdentifierEnum.cRepairOOSAccountsReq) + if (!requestStream) { + return + } + // (Optional) Check verification data in the header + const payload = deserializeRepairOOSAccountsReq(requestStream) + if (!payload?.repairInstructions) { + return + } + + let [latestCycle] = Context.p2p.getLatestCycles(1) + if (!latestCycle) { + this.mainLogger.error('repair_oos_accounts: no latest cycle') + return + } + + if (this.repairRequestsMadeThisCycle.cycle !== latestCycle.counter) { + this.repairRequestsMadeThisCycle.cycle = latestCycle.counter + this.repairRequestsMadeThisCycle.numRequests = 0 + } + + // verifyPayload(AJVSchemaEnum.RepairOOSAccountsReq', payload) + for (const repairInstruction of payload.repairInstructions) { + const { accountID, txId, hash, accountData, targetNodeId, signedReceipt } = repairInstruction + + // check if we are the target node + if (targetNodeId !== Self.id) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: not target node for txId: ${txId}` + ) + continue + } + + // check if we cover this accountId + const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountID) + const isInStorageGroup = storageNodes.map((node) => node.id).includes(Self.id) + if (!isInStorageGroup) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: not in storage group for account: ${accountID}` + ) + continue + } + // check if we have already repaired this account + const accountHashCache = this.stateManager.accountCache.getAccountHash(accountID) + if (accountHashCache != null && accountHashCache.h === hash) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: already repaired account: ${accountID}` + ) + continue + } + if (accountHashCache != null && accountHashCache.t > accountData.timestamp) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: we have newer account: ${accountID}` + ) + continue + } + + const archivedQueueEntry = this.stateManager.transactionQueue.getQueueEntryArchived( + txId, + 'repair_oos_accounts' + ) + + if (archivedQueueEntry == null) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: no archivedQueueEntry for txId: ${txId}` + ) + this.mainLogger.debug(`repair_oos_accounts: no archivedQueueEntry for txId: ${txId}`) + continue + } + + const proposal = signedReceipt.proposal + if (signedReceipt.proposalHash !== this.stateManager.transactionConsensus.calculateVoteHash(proposal)) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: proposal hash mismatch for txId: ${txId}` + ) + continue + } + + const queryFn = async (node: Node) => { + const message = { txid: txId, timestamp: accountData.timestamp } + return await (this.p2p as any).askBinary( + node, + InternalRouteEnum.binary_request_receipt_for_tx, + message, + serializeRequestReceiptForTxReq, + deserializeRequestReceiptForTxResp, + {} + ) + } + + if ( + this.repairRequestsMadeThisCycle.numRequests + 1 > + this.config.stateManager.patcherRepairByReceiptPerUpdate + ) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: too many repair requests this cycle` + ) + this.mainLogger.warn( + `binary/repair_oos_accounts: too many repair requests this cycle (${latestCycle.counter})` + ) + return + } + + // make sure tx hasn't been altered by robust querying for the proposal using request txid and timestamp + const txReceipt = await robustQuery(storageNodes, queryFn) + if (txReceipt.isRobustResult === false) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: robust query failed for txId: ${txId}` + ) + continue + } + this.repairRequestsMadeThisCycle.numRequests++ + + if ( + txReceipt.topResult.success !== true || + txReceipt.topResult.receipt == null || + txReceipt.topResult.receipt.proposalHash == null + ) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: robust query couldn't find queueEntry for txId: ${txId}` + ) + continue + } + + if (signedReceipt.proposalHash !== txReceipt.topResult.receipt.proposalHash) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: proposal hash mismatch for txId: ${txId}` + ) + continue + } + + // if (receivedBestVote != null) { + // Check if vote is from eligible list of voters for this TX + // if ( + // this.stateManager.transactionQueue.useNewPOQ && + // !archivedQueueEntry.eligibleNodeIdsToVote.has(receivedBestVote.node_id) + // ) { + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: vote from ineligible node for txId: ${txId}` + // ) + // continue + // } + + // Check signature of the vote + // if ( + // !this.crypto.verify( + // receivedBestVote as SignedObject, + // archivedQueueEntry.executionGroupMap.get(receivedBestVote.node_id).publicKey + // ) + // ) { + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: vote signature invalid for txId: ${txId}` + // ) + // continue + // } + + // Verify signed receipt + const executionGroupNodes = new Set(archivedQueueEntry.executionGroup.map((node) => node.publicKey)) + const receiptVerification = this.stateManager.transactionConsensus.verifyAppliedReceipt( + signedReceipt, + executionGroupNodes + ) + if (receiptVerification !== true) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `repair_oos_accounts: receipt verification failed for txId: ${txId}` + ) + continue + } + + // Check transaction result from vote + if (!proposal.applied) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: proposal result not true for txId ${txId}` + ) + continue + } + + // Check account hash. Calculate account hash of account given in instruction + // and compare it with the account hash in the vote. + const calculatedAccountHash = this.app.calculateAccountHash(accountData.data) + let accountHashMatch = false + for (let i = 0; i < proposal.accountIDs.length; i++) { + if (proposal.accountIDs[i] === accountID) { + if (proposal.afterStateHashes[i] !== calculatedAccountHash) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: account hash mismatch for txId: ${txId}` + ) + accountHashMatch = false + } else { + accountHashMatch = true + } + break + } + } + if (accountHashMatch === false) { + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts: vote account hash mismatch for txId: ${txId}` + ) + continue + } + // } else { + // // Skip this account apply as we were not able to get the best vote for this tx + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: no vote for txId: ${txId}` + // ) + // continue + // } + + // if (this.stateManager.transactionQueue.useNewPOQ) { + // if (bestMessage != null) { + // // Skip if challenge receipt + // if (bestMessage.message === 'challenge') { + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: challenge for txId: ${txId}` + // ) + // continue + // } + + // // Check if mesasge is from eligible list of responders for this TX + // if (!archivedQueueEntry.eligibleNodeIdsToConfirm.has(bestMessage.nodeId)) { + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: confirmation from ineligible node for txId: ${txId}` + // ) + // continue + // } + + // // Check signature of the message + // if ( + // !this.crypto.verify( + // bestMessage as SignedObject, + // archivedQueueEntry.executionGroupMap.get(bestMessage.nodeId).publicKey + // ) + // ) { + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: confirmation signature invalid for txId: ${txId}` + // ) + // continue + // } + // } else { + // // Skip this account apply as we were not able to get the best confirmation for this tx + // nestedCountersInstance.countEvent( + // 'accountPatcher', + // `binary/repair_oos_accounts: no confirmation for txId: ${txId}` + // ) + // continue + // } + // } + + // update the account data (and cache?) + const updatedAccounts: string[] = [] + //save the account data. note this will make sure account hashes match the wrappers and return failed + // hashes that don't match + const failedHashes = await this.stateManager.checkAndSetAccountData( + [accountData], + `binary/repair_oos_accounts:${txId}`, + true, + updatedAccounts + ) + if (logFlags.debug) + this.mainLogger.debug( + `binary/repair_oos_accounts: ${updatedAccounts.length} updated, ${failedHashes.length} failed` + ) + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts:${updatedAccounts.length} updated, accountId: ${utils.makeShortHash( + accountID + )}, cycle: ${this.stateManager.currentCycleShardData.cycleNumber}` + ) + if (failedHashes.length > 0) + nestedCountersInstance.countEvent( + 'accountPatcher', + `binary/repair_oos_accounts:${failedHashes.length} failed` + ) + let success = false + if (updatedAccounts.length > 0 && failedHashes.length === 0) { + success = true + } + } + } catch (e) { + // Error handling + console.error(`Error in repairMissingAccountsBinary handler: ${e.message}`) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + }, + } + + const getTrieHashesBinary: Route> = { + name: InternalRouteEnum.binary_get_trie_hashes, + handler: async (payloadBuffer, respond, header, sign) => { + const route = InternalRouteEnum.binary_get_trie_hashes + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, false, payloadBuffer.length) + const result = { nodeHashes: [], nodeId: Self.id } as GetTrieHashesResponse + try { + const requestStream = getStreamWithTypeCheck(payloadBuffer, TypeIdentifierEnum.cGetTrieHashesReq) + if (!requestStream) { + respond(result, serializeGetTrieHashesResp) + return + } + const readableReq = deserializeGetTrieHashesReq(requestStream) + let responseCount = 0 + if (!Self.isFailed) { + for (const radix of readableReq.radixList) { + const level = radix.length + const layerMap = this.shardTrie.layerMaps[level] + if (layerMap == null) { + /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes badrange:${level}`) + break + } + const hashTrieNode = layerMap.get(radix) + if (hashTrieNode != null) { + for (const childTreeNode of hashTrieNode.children) { + if (childTreeNode != null) { + result.nodeHashes.push({ radix: childTreeNode.radix, hash: childTreeNode.hash }) + responseCount++ + } + } + } + } + if (responseCount > 0) { + /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, responseCount) + } + } + respond(result, serializeGetTrieHashesResp) + } catch (e) { + // Error handling + console.error(`Error in getTrieHashesBinary handler: ${e.message}`) + respond({ nodeHashes: null }, serializeGetTrieHashesResp) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary(getTrieHashesBinary.name, getTrieHashesBinary.handler) + this.p2p.registerInternalBinary(repairMissingAccountsBinary.name, repairMissingAccountsBinary.handler) + + // this.p2p.registerInternal( + // 'sync_trie_hashes', + // async ( + // payload: HashTrieSyncTell, + // _respondWrapped: unknown, + // sender: string, + // _tracker: string, + // msgSize: number + // ) => { + // profilerInstance.scopedProfileSectionStart('sync_trie_hashes', false, msgSize) + // try { + // //TODO use our own definition of current cycle. + // //use playlod cycle to filter out TXs.. + // const cycle = payload.cycle + + // let hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(payload.cycle) + // if (hashTrieSyncConsensus == null) { + // hashTrieSyncConsensus = { + // cycle: payload.cycle, + // radixHashVotes: new Map(), + // coverageMap: new Map(), + // } + // this.hashTrieSyncConsensusByCycle.set(payload.cycle, hashTrieSyncConsensus) + + // const shardValues = this.stateManager.shardValuesByCycle.get(payload.cycle) + // if (shardValues == null) { + // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `sync_trie_hashes not ready c:${payload.cycle}`) + // return + // } + + // //mark syncing radixes.. + // //todo compare to cycle!! only init if from current cycle. + // this.initStoredRadixValues(payload.cycle) + // } + + // const node = NodeList.nodes.get(sender) + + // for (const nodeHashes of payload.nodeHashes) { + // //don't record the vote if we cant use it! + // // easier than filtering it out later on in the stream. + // if (this.isRadixStored(cycle, nodeHashes.radix) === false) { + // continue + // } + + // //todo: secure that the voter is allowed to vote. + // let hashVote = hashTrieSyncConsensus.radixHashVotes.get(nodeHashes.radix) + // if (hashVote == null) { + // hashVote = { allVotes: new Map(), bestHash: nodeHashes.hash, bestVotes: 1 } + // hashTrieSyncConsensus.radixHashVotes.set(nodeHashes.radix, hashVote) + // hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: [node] }) + // } else { + // const voteEntry = hashVote.allVotes.get(nodeHashes.hash) + // if (voteEntry == null) { + // hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: [node] }) + // } else { + // const voteCount = voteEntry.count + 1 + // voteEntry.count = voteCount + // voteEntry.voters.push(node) + // //hashVote.allVotes.set(nodeHashes.hash, votes + 1) + // //will ties be a problem? (not if we need a majority!) + // if (voteCount > hashVote.bestVotes) { + // hashVote.bestVotes = voteCount + // hashVote.bestHash = nodeHashes.hash + // } + // } + // } + // } + // } finally { + // profilerInstance.scopedProfileSectionEnd('sync_trie_hashes') + // } + // } + // ) + + const syncTrieHashesBinaryHandler: Route> = { + name: InternalRouteEnum.binary_sync_trie_hashes, + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_sync_trie_hashes + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, false, payload.length) + + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + try { + const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cSyncTrieHashesReq) + if (!stream) { + return errorHandler(RequestErrorEnum.InvalidRequest) + } + const request = deserializeSyncTrieHashesReq(stream) + const cycle = request.cycle + + let hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(cycle) + if (hashTrieSyncConsensus == null) { + hashTrieSyncConsensus = { + cycle, + radixHashVotes: new Map(), + coverageMap: new Map(), + } + this.hashTrieSyncConsensusByCycle.set(cycle, hashTrieSyncConsensus) + + const shardValues = this.stateManager.shardValuesByCycle.get(cycle) + if (shardValues == null) { + nestedCountersInstance.countEvent('accountPatcher', `sync_trie_hashes not ready c:${cycle}`) + if (logFlags.debug) console.error(`Shard values not ready for cycle: ${cycle}`) + return + } + + //mark syncing radixes.. + //todo compare to cycle!! only init if from current cycle. + this.initStoredRadixValues(cycle) + } + + const node = NodeList.nodes.get(header.sender_id) + + for (const nodeHashes of request.nodeHashes) { + if (this.isRadixStored(cycle, nodeHashes.radix) === false) { + continue + } + + // check the length of the radix + if (nodeHashes.radix.length !== this.treeSyncDepth) { + if (logFlags.error) + this.mainLogger.error(`syncTrieHashesBinaryHandler: radix length mismatch: ${nodeHashes.radix}`) + nestedCountersInstance.countEvent('accountPatcher', `${route}-radix-length-mismatch`) + continue + } + + // todo: secure that the voter is allowed to vote. + let hashVote = hashTrieSyncConsensus.radixHashVotes.get(nodeHashes.radix) + if (hashVote == null) { + hashVote = { allVotes: new Map(), bestHash: nodeHashes.hash, bestVotes: 1 } + hashTrieSyncConsensus.radixHashVotes.set(nodeHashes.radix, hashVote) + hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: new Set([node]) }) + } else { + const voteEntry = hashVote.allVotes.get(nodeHashes.hash) + if (voteEntry == null) { + hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: new Set([node]) }) + } else { + voteEntry.voters.add(node) + const voteCount = voteEntry.voters.size + voteEntry.count = voteCount + if (voteCount > hashVote.bestVotes) { + hashVote.bestVotes = voteCount + hashVote.bestHash = nodeHashes.hash + } + } + } + } + } catch (e) { + /* prettier-ignore */ if (logFlags.error) console.error(`Error processing syncTrieHashesBinaryHandler: ${e}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary(syncTrieHashesBinaryHandler.name, syncTrieHashesBinaryHandler.handler) + + // //get child accountHashes for radix. //get the hashes and ids so we know what to fix. + // this.p2p.registerInternal( + // 'get_trie_accountHashes', + // async ( + // payload: HashTrieReq, + // respond: (arg0: HashTrieAccountsResp) => Promise, + // _sender: string, + // _tracker: string, + // msgSize: number + // ) => { + // profilerInstance.scopedProfileSectionStart('get_trie_accountHashes', false, msgSize) + // //nodeChildHashes: {radix:string, childAccounts:{accountID:string, hash:string}[]}[] + // const result = { + // nodeChildHashes: [], + // stats: { matched: 0, visisted: 0, empty: 0, childCount: 0 }, + // nodeId: Self.id + // } as HashTrieAccountsResp + + // const patcherMaxChildHashResponses = this.config.stateManager.patcherMaxChildHashResponses + + // for (const radix of payload.radixList) { + // result.stats.visisted++ + // const level = radix.length + // const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + // if (layerMap == null) { + // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) + // break + // } + + // const hashTrieNode = layerMap.get(radix) + // if (hashTrieNode != null && hashTrieNode.accounts != null) { + // result.stats.matched++ + // const childAccounts = [] + // result.nodeChildHashes.push({ radix, childAccounts }) + // for (const account of hashTrieNode.accounts) { + // childAccounts.push({ accountID: account.accountID, hash: account.hash }) + // result.stats.childCount++ + // } + // if (hashTrieNode.accounts.length === 0) { + // result.stats.empty++ + // } + // } + + // //some protection on how many responses we can send + // if (result.stats.childCount > patcherMaxChildHashResponses) { + // break + // } + // } + + // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, result.stats.childCount) + + // const respondSize = await respond(result) + // profilerInstance.scopedProfileSectionEnd('get_trie_accountHashes', respondSize) + // } + // ) + + const getTrieAccountHashesBinaryHandler: Route> = { + name: InternalRouteEnum.binary_get_trie_account_hashes, + handler: (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_get_trie_account_hashes + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + const result = { + nodeChildHashes: [], + stats: { matched: 0, visisted: 0, empty: 0, childCount: 0 }, + nodeId: Self.id, + } as HashTrieAccountsResp + try { + const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAccountTrieHashesReq) + if (!stream) { + requestErrorHandler(route, RequestErrorEnum.InvalidRequest, header) + return respond(BadRequest('invalid request stream'), serializeResponseError) + } + const req = deserializeGetTrieAccountHashesReq(stream) + const radixList = req.radixList + const patcherMaxChildHashResponses = this.config.stateManager.patcherMaxChildHashResponses + for (const radix of radixList) { + result.stats.visisted++ + const level = radix.length + const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + if (layerMap == null) { + /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) + break + } + + const hashTrieNode = layerMap.get(radix) + if (hashTrieNode != null && hashTrieNode.accounts != null) { + result.stats.matched++ + const childAccounts = [] + result.nodeChildHashes.push({ radix, childAccounts }) + for (const account of hashTrieNode.accounts) { + childAccounts.push({ accountID: account.accountID, hash: account.hash }) + result.stats.childCount++ + } + if (hashTrieNode.accounts.length === 0) { + result.stats.empty++ + } + } + + //some protection on how many responses we can send + if (result.stats.childCount > patcherMaxChildHashResponses) { + break + } + } + + /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent('accountPatcher', `binary_get_trie_accountHashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, result.stats.childCount) + respond(result, serializeGetTrieAccountHashesResp) + } catch (e) { + this.statemanager_fatal( + 'binary_get_trie_accountHashes-failed', + 'binary_get_trie_accountHashes:' + e.name + ': ' + e.message + ' at ' + e.stack + ) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + respond(InternalError('exception executing request'), serializeResponseError) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary(getTrieAccountHashesBinaryHandler.name, getTrieAccountHashesBinaryHandler.handler) + + // this.p2p.registerInternal( + // 'get_account_data_by_hashes', + // async ( + // payload: HashTrieAccountDataRequest, + // respond: (arg0: HashTrieAccountDataResponse) => Promise, + // _sender: string, + // _tracker: string, + // msgSize: number + // ) => { + // profilerInstance.scopedProfileSectionStart('get_account_data_by_hashes', false, msgSize) + // nestedCountersInstance.countEvent('accountPatcher', `get_account_data_by_hashes`) + // const result: HashTrieAccountDataResponse = { accounts: [], stateTableData: [] } + // try { + // //nodeChildHashes: {radix:string, childAccounts:{accountID:string, hash:string}[]}[] + // const queryStats = { + // fix1: 0, + // fix2: 0, + // skip_localHashMismatch: 0, + // skip_requestHashMismatch: 0, + // returned: 0, + // missingResp: false, + // noResp: false, + // } + + // const hashMap = new Map() + // const accountIDs = [] + + // //should limit on asking side, this is just a precaution + // if (payload.accounts.length > 900) { + // payload.accounts = payload.accounts.slice(0, 900) + // } + + // for (const accountHashEntry of payload.accounts) { + // // let radix = accountHashEntry.accountID.substr(0, this.treeMaxDepth) + // // let layerMap = this.shardTrie.layerMaps[this.treeMaxDepth] + // // let hashTrieNode = layerMap.get(radix) + // if ( + // accountHashEntry == null || + // accountHashEntry.hash == null || + // accountHashEntry.accountID == null + // ) { + // queryStats.fix1++ + // continue + // } + // hashMap.set(accountHashEntry.accountID, accountHashEntry.hash) + // accountIDs.push(accountHashEntry.accountID) + // } + + // const accountData = await this.app.getAccountDataByList(accountIDs) + + // const skippedAccounts: AccountIDAndHash[] = [] + // const returnedAccounts: AccountIDAndHash[] = [] + + // const accountsToGetStateTableDataFor = [] + // //only return results that match the requested hash! + // const accountDataFinal: Shardus.WrappedData[] = [] + // if (accountData != null) { + // for (const wrappedAccount of accountData) { + // if (wrappedAccount == null || wrappedAccount.stateId == null || wrappedAccount.data == null) { + // queryStats.fix2++ + // continue + // } + + // const { accountId, stateId, data: recordData } = wrappedAccount + // const accountHash = this.app.calculateAccountHash(recordData) + // if (stateId !== accountHash) { + // skippedAccounts.push({ accountID: accountId, hash: stateId }) + // queryStats.skip_localHashMismatch++ + // continue + // } + + // if (hashMap.get(accountId) === wrappedAccount.stateId) { + // accountDataFinal.push(wrappedAccount) + // returnedAccounts.push({ accountID: accountId, hash: stateId }) + // accountsToGetStateTableDataFor.push(accountId) + // queryStats.returned++ + // } else { + // queryStats.skip_requestHashMismatch++ + // skippedAccounts.push({ accountID: accountId, hash: stateId }) + // } + + // // let wrappedAccountInQueueRef = wrappedAccount as Shardus.WrappedDataFromQueue + // // wrappedAccountInQueueRef.seenInQueue = false + + // // if (this.stateManager.lastSeenAccountsMap != null) { + // // let queueEntry = this.stateManager.lastSeenAccountsMap[wrappedAccountInQueueRef.accountId] + // // if (queueEntry != null) { + // // wrappedAccountInQueueRef.seenInQueue = true + // // } + // // } + // } + // } + // //PERF could disable this for more perf? + // //this.stateManager.testAccountDataWrapped(accountDataFinal) + + // if (queryStats.returned < payload.accounts.length) { + // nestedCountersInstance.countEvent('accountPatcher', `get_account_data_by_hashes incomplete`) + // queryStats.missingResp = true + // if (queryStats.returned === 0) { + // nestedCountersInstance.countEvent('accountPatcher', `get_account_data_by_hashes no results`) + // queryStats.noResp = true + // } + // } + + // this.mainLogger.debug( + // `get_account_data_by_hashes1 requests[${payload.accounts.length}] :${utils.stringifyReduce( + // payload.accounts + // )} ` + // ) + // this.mainLogger.debug( + // `get_account_data_by_hashes2 skippedAccounts:${utils.stringifyReduce(skippedAccounts)} ` + // ) + // this.mainLogger.debug( + // `get_account_data_by_hashes3 returnedAccounts:${utils.stringifyReduce(returnedAccounts)} ` + // ) + // this.mainLogger.debug( + // `get_account_data_by_hashes4 queryStats:${utils.stringifyReduce(queryStats)} ` + // ) + // this.mainLogger.debug( + // `get_account_data_by_hashes4 stateTabledata:${utils.stringifyReduce(result.stateTableData)} ` + // ) + // result.accounts = accountDataFinal + // } catch (ex) { + // this.statemanager_fatal( + // `get_account_data_by_hashes-failed`, + // 'get_account_data_by_hashes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack + // ) + // } + // const respondSize = await respond(result) + // profilerInstance.scopedProfileSectionEnd('get_account_data_by_hashes', respondSize) + // } + // ) + + const getAccountDataByHashesBinaryHandler: Route> = { + name: InternalRouteEnum.binary_get_account_data_by_hashes, + handler: async (payload, respond) => { + const route = InternalRouteEnum.binary_get_account_data_by_hashes + profilerInstance.scopedProfileSectionStart(route) + nestedCountersInstance.countEvent('internal', route) + const result = { accounts: [], stateTableData: [] } as GetAccountDataByHashesResp + try { + const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAccountDataByHashesReq) + if (!stream) { + return respond(result, serializeGetAccountDataByHashesResp) + } + + const req = deserializeGetAccountDataByHashesReq(stream) + + const queryStats = { + fix1: 0, + fix2: 0, + skip_localHashMismatch: 0, + skip_requestHashMismatch: 0, + returned: 0, + missingResp: false, + noResp: false, + } + + const hashMap = new Map() + const accountIDs = [] + + if (req.accounts.length > 900) { + req.accounts = req.accounts.slice(0, 900) + } + + for (const accountHashEntry of req.accounts) { + if (accountHashEntry == null || accountHashEntry.hash == null || accountHashEntry.accountID == null) { + queryStats.fix1++ + continue + } + hashMap.set(accountHashEntry.accountID, accountHashEntry.hash) + accountIDs.push(accountHashEntry.accountID) + } + + const accountData = await this.app.getAccountDataByList(accountIDs) + const skippedAccounts: AccountIDAndHash[] = [] + const returnedAccounts: AccountIDAndHash[] = [] + + const accountsToGetStateTableDataFor = [] + const accountDataFinal: WrappedData[] = [] + + if (accountData != null) { + for (const wrappedAccount of accountData) { + if (wrappedAccount == null || wrappedAccount.stateId == null || wrappedAccount.data == null) { + queryStats.fix2++ + continue + } + const { accountId, stateId, data: recordData } = wrappedAccount + const accountHash = this.app.calculateAccountHash(recordData) + if (stateId !== accountHash) { + skippedAccounts.push({ accountID: accountId, hash: stateId }) + queryStats.skip_localHashMismatch++ + continue + } + + if (hashMap.get(accountId) === wrappedAccount.stateId) { + accountDataFinal.push(wrappedAccount) + returnedAccounts.push({ accountID: accountId, hash: stateId }) + accountsToGetStateTableDataFor.push(accountId) + queryStats.returned++ + } else { + queryStats.skip_requestHashMismatch++ + skippedAccounts.push({ accountID: accountId, hash: stateId }) + } + } + } + + if (queryStats.returned < req.accounts.length) { + nestedCountersInstance.countEvent('internal', `${route} incomplete`) + queryStats.missingResp = true + if (queryStats.returned === 0) { + nestedCountersInstance.countEvent('internal', `${route} no results`) + queryStats.noResp = true + } + } + + this.mainLogger.debug(`${route} 1 requests[${req.accounts.length}] :${utils.stringifyReduce(req.accounts)} `) + this.mainLogger.debug(`${route} 2 skippedAccounts:${utils.stringifyReduce(skippedAccounts)} `) + this.mainLogger.debug(`${route} 3 returnedAccounts:${utils.stringifyReduce(returnedAccounts)} `) + this.mainLogger.debug(`${route} 4 queryStats:${utils.stringifyReduce(queryStats)} `) + this.mainLogger.debug(`${route} stateTabledata:${utils.stringifyReduce(result.stateTableData)} `) + result.accounts = accountDataFinal + respond(result, serializeGetAccountDataByHashesResp) + } catch (ex) { + this.statemanager_fatal( + `get_account_data_by_hashes-failed`, + 'get_account_data_by_hashes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack + ) + respond(result, serializeGetAccountDataByHashesResp) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary( + getAccountDataByHashesBinaryHandler.name, + getAccountDataByHashesBinaryHandler.handler + ) + + Context.network.registerExternalGet('debug-patcher-ignore-hash-updates', isDebugModeMiddleware, (_req, res) => { + try { + this.debug_ignoreUpdates = !this.debug_ignoreUpdates + res.write(`this.debug_ignoreUpdates: ${this.debug_ignoreUpdates}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + Context.network.registerExternalGet('debug-patcher-fail-tx', isDebugModeMiddleware, (_req, res) => { + try { + //toggle chance to fail TXs in a way that they do not get fixed by the first tier of repair. + + if (this.stateManager.failNoRepairTxChance === 0) { + this.stateManager.failNoRepairTxChance = 1 + } else { + this.stateManager.failNoRepairTxChance = 0 + } + + res.write(`this.failNoRepairTxChance: ${this.stateManager.failNoRepairTxChance}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + Context.network.registerExternalGet('debug-patcher-voteflip', isDebugModeMiddleware, (_req, res) => { + try { + if (this.stateManager.voteFlipChance === 0) { + this.stateManager.voteFlipChance = 1 + } else { + this.stateManager.voteFlipChance = 0 + } + + res.write(`this.voteFlipChance: ${this.stateManager.voteFlipChance}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + Context.network.registerExternalGet('debug-patcher-toggle-skip', isDebugModeMiddleware, (_req, res) => { + try { + if (this.stateManager.debugSkipPatcherRepair === false) { + this.stateManager.debugSkipPatcherRepair = true + } else { + this.stateManager.debugSkipPatcherRepair = false + } + + res.write(`this.debugSkipPatcherRepair: ${this.stateManager.debugSkipPatcherRepair}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + Context.network.registerExternalGet('debug-patcher-dumpTree', isDebugModeMiddlewareMedium, (_req, res) => { + try { + // this.statemanager_fatal('debug shardTrie',`temp shardTrie ${utils.stringifyReduce(this.shardTrie.layerMaps[0].values().next().value)}`) + // res.write(`${utils.stringifyReduce(this.shardTrie.layerMaps[0].values().next().value)}\n`) + + const trieRoot = this.shardTrie.layerMaps[0].values().next().value + + //strip noisy fields + const tempString = JSON.stringify(trieRoot, utils.debugReplacer) + const processedObject = Utils.safeJsonParse(tempString) + + // use stringify to put a stable sort on the object keys (important for comparisons) + const finalStr = utils.stringifyReduce(processedObject) + + this.statemanager_fatal('debug shardTrie', `temp shardTrie ${finalStr}`) + res.write(`${finalStr}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-patcher-dumpTree-partial', isDebugModeMiddlewareMedium, (req, res) => { + try { + const subTree: boolean = req.query.subtree === 'true' + let radix: string = req.query.radix as string + if (radix.length > this.treeMaxDepth) radix = radix.slice(0, this.treeMaxDepth) + const level = radix.length + const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection + + let hashTrieNode = layerMap.get(radix.toLowerCase()) + if (!subTree) { + // deep clone the trie node before removing children property + hashTrieNode = Utils.safeJsonParse(Utils.safeStringify(hashTrieNode)) + delete hashTrieNode.children + } + if (!hashTrieNode) { + /* prettier-ignore */ if (logFlags.error) console.error('debug-patcher-dumpTree-partial - Radix not found. Returning 404') + res.status(404).json({ error: 'Radix not found' }) + return + } + //strip noisy fields + const tempString = JSON.stringify(hashTrieNode, utils.debugReplacer) + const processedObject = Utils.safeJsonParse(tempString) + + // use stringify to put a stable sort on the object keys (important for comparisons) + const finalStr = utils.stringifyReduce(processedObject) + + this.statemanager_fatal('debug shardTrie', `temp shardTrie ${finalStr}`) + res.write(`${finalStr}\n`) + } catch (e) { + console.log('Error', e) + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-patcher-fail-hashes', isDebugModeMiddlewareLow, (_req, res) => { + try { + const lastCycle = this.p2p.state.getLastCycle() + const cycle = lastCycle.counter + const minVotes = this.calculateMinVotes() + const notEnoughVotesRadix = {} + const outOfSyncRadix = {} + + const hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(cycle) + + if (!hashTrieSyncConsensus) { + res.json({ error: `Unable to find hashTrieSyncConsensus for last cycle ${lastCycle}` }) + return + } + + for (const radix of hashTrieSyncConsensus.radixHashVotes.keys()) { + const votesMap = hashTrieSyncConsensus.radixHashVotes.get(radix) + const ourTrieNode = this.shardTrie.layerMaps[this.treeSyncDepth].get(radix) + + const hasEnoughVotes = votesMap.bestVotes >= minVotes + const isRadixInSync = ourTrieNode ? ourTrieNode.hash === votesMap.bestHash : false + + if (!hasEnoughVotes || !isRadixInSync) { + const kvp = [] + for (const [key, value] of votesMap.allVotes.entries()) { + kvp.push({ + id: key, + count: value.count, + nodeIDs: [...value.voters].map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort), + }) + } + const simpleMap = { + bestHash: votesMap.bestHash, + ourHash: ourTrieNode ? ourTrieNode.hash : '', + bestVotes: votesMap.bestVotes, + minVotes, + allVotes: kvp, + } + if (!hasEnoughVotes) notEnoughVotesRadix[radix] = simpleMap // eslint-disable-line security/detect-object-injection + if (!isRadixInSync) outOfSyncRadix[radix] = simpleMap // eslint-disable-line security/detect-object-injection + } + } + res.json({ + cycle, + notEnoughVotesRadix, + outOfSyncRadix, + }) + return + } catch (e) { + console.log('Error', e) + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('get-tree-last-insync', isDebugModeMiddlewareLow, (_req, res) => { + res.write(`${this.failedLastTrieSync === false}\n`) + res.end() + }) + + Context.network.registerExternalGet('get-tree-last-insync-detail', isDebugModeMiddlewareLow, (_req, res) => { + let prettyJSON = JSON.stringify(this.lastInSyncResult, null, 2) + res.write(`${prettyJSON}\n`) + res.end() + }) + + Context.network.registerExternalGet('trie-repair-dump', isDebugModeMiddleware, (_req, res) => { + res.write(`${utils.stringifyReduce(this.lastRepairInfo)}\n`) + res.end() + }) + + // + Context.network.registerExternalGet('get-shard-dump', isDebugModeMiddleware, (_req, res) => { + res.write(`${this.stateManager.lastShardReport}\n`) + res.end() + }) + + /** + * + * + * Usage: http://:/account-report?id= + */ + Context.network.registerExternalGet('account-report', isDebugModeMiddleware, async (req, res) => { + if (req.query.id == null) return + let id = req.query.id as string + res.write(`report for: ${id} \n`) + try { + if (id.length === 10) { + //short form.. + let found = false + const prefix = id.substring(0, 4) + const low = prefix + '0'.repeat(60) + const high = prefix + 'f'.repeat(60) + + const suffix = id.substring(5, 10) + const possibleAccounts = await this.app.getAccountDataByRange(low, high, 0, shardusGetTime(), 100, 0, '') + + res.write(`searching ${possibleAccounts.length} accounts \n`) + + for (const account of possibleAccounts) { + if (account.accountId.endsWith(suffix)) { + res.write(`found full account ${id} => ${account.accountId} \n`) + id = account.accountId + found = true + + break + } + } + + if (found == false) { + res.write(`could not find account\n`) + res.end() + return + } + } + + const trieAccount = this.getAccountTreeInfo(id) + const accountHash = this.stateManager.accountCache.getAccountHash(id) + const accountHashFull = this.stateManager.accountCache.getAccountDebugObject(id) //this.stateManager.accountCache.accountsHashCache3.accountHashMap.get(id) + const accountData = await this.app.getAccountDataByList([id]) + res.write(`trieAccount: ${Utils.safeStringify(trieAccount)} \n`) + res.write(`accountHash: ${Utils.safeStringify(accountHash)} \n`) + res.write(`accountHashFull: ${Utils.safeStringify(accountHashFull)} \n`) + res.write(`accountData: ${JSON.stringify(accountData, appdata_replacer)} \n\n`) + res.write(`tests: \n`) + if (accountData != null && accountData.length === 1 && accountHash != null) { + res.write(`accountData hash matches cache ${accountData[0].stateId === accountHash.h} \n`) + } + if (accountData != null && accountData.length === 1 && trieAccount != null) { + res.write(`accountData matches trieAccount ${accountData[0].stateId === trieAccount.hash} \n`) + } + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + /** + * + * + * Usage: http://:/account-coverage?id= + */ + Context.network.registerExternalGet('account-coverage', isDebugModeMiddleware, async (req, res) => { + if (req.query.id === null) return + const id = req.query.id as string + + const possibleAccountsIds: string[] = [] + try { + if (id.length === 10) { + //short form.. + const prefix = id.substring(0, 4) + const low = prefix + '0'.repeat(60) + const high = prefix + 'f'.repeat(60) + + const suffix = id.substring(5, 10) + const possibleAccounts = await this.app.getAccountDataByRange(low, high, 0, shardusGetTime(), 100, 0, '') + + for (const account of possibleAccounts) { + if (account.accountId.endsWith(suffix)) { + possibleAccountsIds.push(account.accountId) + } + } + } else { + possibleAccountsIds.push(id) + } + + if (possibleAccountsIds.length === 0) { + res.write( + Utils.safeStringify({ + success: false, + error: 'could not find account', + }) + ) + } else { + const resObj = {} + for (const accountId of possibleAccountsIds) { + const consensusNodes = this.stateManager.transactionQueue.getConsenusGroupForAccount(accountId) + const storedNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountId) + + // eslint-disable-next-line security/detect-object-injection + resObj[accountId] = { + consensusNodes: consensusNodes.map((node) => { + return { + id: node.id, + externalIp: node.externalIp, + externalPort: node.externalPort, + internalIp: node.internalIp, + internalPort: node.internalPort, + } + }), + storedNodes: storedNodes.map((node) => { + return { + id: node.id, + externalIp: node.externalIp, + externalPort: node.externalPort, + internalIp: node.internalIp, + internalPort: node.internalPort, + } + }), + } + } + res.write( + Utils.safeStringify({ + success: true, + result: resObj, + }) + ) + } + } catch (e) { + res.write( + Utils.safeStringify({ + success: false, + error: e, + }) + ) + } + res.end() + }) + + Context.network.registerExternalGet('hack-version', isDebugModeMiddleware, (_req, res) => { + res.write(`1.0.1\n`) + res.end() + }) + } +} \ No newline at end of file diff --git a/src/state-manager/AccountPatcher.trie.ts b/src/state-manager/AccountPatcher.trie.ts new file mode 100644 index 000000000..a7af0063c --- /dev/null +++ b/src/state-manager/AccountPatcher.trie.ts @@ -0,0 +1,335 @@ +import { HashTrieUpdateStats, HashTrieNode, TrieAccount } from './state-manager-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as utils from '../utils' +import { logFlags } from '../logger' +import { StateManager as StateManagerTypes } from '@shardeum-foundation/lib-types' + +export const trieMethods = { + upateShardTrie(cycle: number): HashTrieUpdateStats { + //we start with the later of nodes at max depth, and will build upwards one layer at a time + const currentLayer = this.treeMaxDepth + let treeNodeQueue: HashTrieNode[] = [] + + const updateStats = { + leafsUpdated: 0, + leafsCreated: 0, + updatedNodesPerLevel: new Array(this.treeMaxDepth + 1).fill(0), + hashedChildrenPerLevel: new Array(this.treeMaxDepth + 1).fill(0), + totalHashes: 0, + //totalObjectsHashed: 0, + totalNodesHashed: 0, + totalAccountsHashed: 0, + totalLeafs: 0, + } + + //feed account data into lowest layer, generates list of treeNodes + let currentMap = this.shardTrie.layerMaps[currentLayer] // eslint-disable-line security/detect-object-injection + if (currentMap == null) { + currentMap = new Map() + this.shardTrie.layerMaps[currentLayer] = currentMap // eslint-disable-line security/detect-object-injection + } + + //process accounts that need updating. Create nodes as needed + for (let i = 0; i < this.accountUpdateQueue.length; i++) { + const tx = this.accountUpdateQueue[i] // eslint-disable-line security/detect-object-injection + const key = tx.accountID.slice(0, currentLayer) + let leafNode = currentMap.get(key) + if (leafNode == null) { + //init a leaf node. + //leaf nodes will have a list of accounts that share the same radix. + leafNode = { + radix: key, + children: [], + childHashes: [], + accounts: [], + hash: '', + accountTempMap: new Map(), + updated: true, + isIncomplete: false, + nonSparseChildCount: 0, + } //this map will cause issues with update + currentMap.set(key, leafNode) + updateStats.leafsCreated++ + treeNodeQueue.push(leafNode) + } + + //this can happen if the depth gets smaller after being larger + if (leafNode.accountTempMap == null) { + leafNode.accountTempMap = new Map() + } + if (leafNode.accounts == null) { + leafNode.accounts = [] + } + + if (leafNode.accountTempMap.has(tx.accountID) === false) { + this.totalAccounts++ + } + leafNode.accountTempMap.set(tx.accountID, tx) + if (leafNode.updated === false) { + treeNodeQueue.push(leafNode) + updateStats.leafsUpdated++ + } + leafNode.updated = true + + //too frequent in large tests. only use this in local tests with smaller data + //if (logFlags.verbose) /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('accountPatcher', `upateShardTrie ${utils.makeShortHash(tx.accountID)}`, `upateShardTrie update: ${utils.makeShortHash(tx.accountID)} h:${utils.makeShortHash(tx.hash)}`) + } + + let removedAccounts = 0 + let removedAccountsFailed = 0 + + if (this.accountRemovalQueue.length > 0) { + //this.statemanager_fatal(`temp accountRemovalQueue`,`accountRemovalQueue c:${cycle} ${utils.stringifyReduce(this.accountRemovalQueue)}`) + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`remove account from trie tracking c:${cycle} ${utils.stringifyReduce(this.accountRemovalQueue)}`) + } + + //remove accoutns from the trie. this happens if our node no longer carries them in storage range. + for (let i = 0; i < this.accountRemovalQueue.length; i++) { + const accountID = this.accountRemovalQueue[i] // eslint-disable-line security/detect-object-injection + + const key = accountID.slice(0, currentLayer) + const treeNode = currentMap.get(key) + if (treeNode == null) { + continue //already gone! + } + + if (treeNode.updated === false) { + treeNodeQueue.push(treeNode) + } + treeNode.updated = true + + if (treeNode.accountTempMap == null) { + treeNode.accountTempMap = new Map() + } + if (treeNode.accounts == null) { + treeNode.accounts = [] + } + const removed = treeNode.accountTempMap.delete(accountID) + if (removed) { + removedAccounts++ + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('accountPatcher', `upateShardTrie ${utils.makeShortHash(accountID)}`, `upateShardTrie remove ${utils.makeShortHash(accountID)} `) + } else { + removedAccountsFailed++ + } + } + if (removedAccounts > 0) { + nestedCountersInstance.countEvent(`accountPatcher`, `removedAccounts c:${cycle}`, removedAccounts) + } + if (removedAccountsFailed > 0) { + /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `removedAccountsFailed c:${cycle}`, removedAccountsFailed) + } + this.accountRemovalQueue = [] + + // for(let treeNode of this.incompleteNodes){ + // treeNodeQueue.push(treeNode) + // } + + //look at updated leaf nodes. Sort accounts and update hash values + for (let i = 0; i < treeNodeQueue.length; i++) { + const treeNode = treeNodeQueue[i] // eslint-disable-line security/detect-object-injection + + if (treeNode.updated === true) { + //treeNode.accountTempMap != null){ + treeNode.accounts = Array.from(treeNode.accountTempMap.values()) + + //delete treeNode.accountTempMap... need to keep it + //treeNode.accountTempMap = null + + //sort treeNode.accounts by accountID + treeNode.accounts.sort(this.sortByAccountID) + //compute treenode hash of accounts + treeNode.hash = this.hashObj(treeNode.accounts.map((a) => a.hash)) //todo why is this needed!!! + + treeNode.updated = false + + updateStats.totalHashes++ + updateStats.totalAccountsHashed = updateStats.totalAccountsHashed + treeNode.accounts.length + updateStats.updatedNodesPerLevel[currentLayer] = updateStats.updatedNodesPerLevel[currentLayer] + 1 // eslint-disable-line security/detect-object-injection + } + } + + // update the tree one later at a time. start at the max depth and copy values to the parents. + // Then the parent depth becomes the working depth and we repeat the process + // a queue is used to efficiently update only the nodes that need it. + // hashes are efficiently calculated only once after all children have set their hash data in the childHashes + let parentTreeNodeQueue = [] + //treenode queue has updated treeNodes from each loop, gets fed into next loop + for (let i = currentLayer - 1; i >= 0; i--) { + currentMap = this.shardTrie.layerMaps[i] // eslint-disable-line security/detect-object-injection + if (currentMap == null) { + currentMap = new Map() + this.shardTrie.layerMaps[i] = currentMap // eslint-disable-line security/detect-object-injection + } + //loop each node in treeNodeQueue (nodes from the previous level down) + for (let j = 0; j < treeNodeQueue.length; j++) { + const treeNode = treeNodeQueue[j] // eslint-disable-line security/detect-object-injection + + //compute parent nodes. + const parentKey = treeNode.radix.slice(0, i) + // fast? 0-15 conversion + let index = treeNode.radix.charCodeAt(i) + index = index < 90 ? index - 48 : index - 87 + //get parent node + let parentTreeNode = currentMap.get(parentKey) + if (parentTreeNode == null) { + parentTreeNode = { + radix: parentKey, + children: new Array(16), + childHashes: new Array(16), + updated: false, + hash: '', + isIncomplete: false, + nonSparseChildCount: 0, + } + currentMap.set(parentKey, parentTreeNode) + } + + //if we have not set this child yet then count it + // eslint-disable-next-line security/detect-object-injection + if (parentTreeNode.children[index] == null) { + // eslint-disable-line security/detect-object-injection + parentTreeNode.nonSparseChildCount++ + } + + //assign position + parentTreeNode.children[index] = treeNode // eslint-disable-line security/detect-object-injection + parentTreeNode.childHashes[index] = treeNode.hash // eslint-disable-line security/detect-object-injection + + //insert new parent nodes if we have not yet, guided by updated flag + if (parentTreeNode.updated === false) { + parentTreeNodeQueue.push(parentTreeNode) + parentTreeNode.updated = true + } + + if (treeNode.isIncomplete) { + // if(parentTreeNode.isIncomplete === false && parentTreeNode.updated === false ){ + // parentTreeNode.updated = true + // parentTreeNodeQueue.push(parentTreeNode) + // } + parentTreeNode.isIncomplete = true + } + + treeNode.updated = false //finished update of this node. + } + + updateStats.updatedNodesPerLevel[i] = parentTreeNodeQueue.length // eslint-disable-line security/detect-object-injection + + //when we are one step below the sync depth add in incompete parents for hash updates! + // if(i === this.treeSyncDepth + 1){ + // for(let treeNode of this.incompleteNodes){ + // parentTreeNodeQueue.push(treeNode) + // } + // } + + //loop and compute hashes of parents + for (let j = 0; j < parentTreeNodeQueue.length; j++) { + const parentTreeNode = parentTreeNodeQueue[j] // eslint-disable-line security/detect-object-injection + parentTreeNode.hash = this.hashObj(parentTreeNode.childHashes) + + updateStats.totalHashes++ + updateStats.totalNodesHashed = updateStats.totalNodesHashed + parentTreeNode.nonSparseChildCount + updateStats.hashedChildrenPerLevel[i] = // eslint-disable-line security/detect-object-injection + updateStats.hashedChildrenPerLevel[i] + parentTreeNode.nonSparseChildCount // eslint-disable-line security/detect-object-injection + } + //set the parents to the treeNodeQueue so we can loop and work on the next layer up + treeNodeQueue = parentTreeNodeQueue + parentTreeNodeQueue = [] + } + + updateStats.totalLeafs = this.shardTrie.layerMaps[this.treeMaxDepth].size + + this.accountUpdateQueue = [] + + return updateStats + }, + + /** + * updateTrieAndBroadCast + * calculates what our tree leaf(max) depth and sync depths are. + * if there is a change we have to do some partition work to send old leaf data to new leafs. + * Then calls upateShardTrie() and broadcastSyncHashes() + * + * @param cycle + */ + async updateTrieAndBroadCast(cycle: number): Promise { + //calculate sync levels!! + const shardValues = this.stateManager.shardValuesByCycle.get(cycle) + const shardGlobals = shardValues.shardGlobals as StateManagerTypes.shardFunctionTypes.ShardGlobals + + const minHashesPerRange = 4 + // y = floor(log16((minHashesPerRange * max(1, x/consensusRange )))) + let syncDepthRaw = + Math.log(minHashesPerRange * Math.max(1, shardGlobals.numPartitions / (shardGlobals.consensusRadius * 2 + 1))) / + Math.log(16) + syncDepthRaw = Math.max(1, syncDepthRaw) // at least 1 + const newSyncDepth = Math.ceil(syncDepthRaw) + + //This only happens when the depth of our tree change (based on num nodes above) + //We have to partition the leaf node data into leafs of the correct level and rebuild the tree + if (this.treeSyncDepth != newSyncDepth) { + //todo add this in to prevent size flipflop..(better: some deadspace) && newSyncDepth > this.treeSyncDepth){ + const resizeStats = { + nodesWithAccounts: 0, + nodesWithoutAccounts: 0, + } + const newMaxDepth = newSyncDepth + 3 //todo the "+3" should be based on total number of stored accounts pre node (in a consensed way, needs to be on cycle chain) + //add more maps if needed (+1 because we have a map level 0) + while (this.shardTrie.layerMaps.length < newMaxDepth + 1) { + this.shardTrie.layerMaps.push(new Map()) + } + + //detach all accounts. + const currentLeafMap = this.shardTrie.layerMaps[this.treeMaxDepth] + + //put all accounts into queue to rebuild Tree! + for (const treeNode of currentLeafMap.values()) { + if (treeNode.accounts != null) { + for (const account of treeNode.accounts) { + //this.updateAccountHash(account.accountID, account.hash) + + //need to unshift these, becasue they could be older than what is alread in the queue!! + this.accountUpdateQueue.unshift(account) + } + // //clear out leaf node only properties: + // treeNode.accounts = null + // treeNode.accountTempMap = null + + // //have to init these nodes to work as parents + // treeNode.children = Array(16) + // treeNode.childHashes = Array(16) + + //nestedCountersInstance.countEvent(`accountPatcher`, `updateTrieAndBroadCast: ok account list?`) + resizeStats.nodesWithAccounts++ + } else { + //nestedCountersInstance.countEvent(`accountPatcher`, `updateTrieAndBroadCast: null account list?`) + resizeStats.nodesWithoutAccounts++ + } + } + + //better to just wipe out old parent nodes! + for (let idx = 0; idx < newMaxDepth; idx++) { + this.shardTrie.layerMaps[idx].clear() // eslint-disable-line security/detect-object-injection + } + + if (newMaxDepth < this.treeMaxDepth) { + //cant get here, but consider deleting layers out of the map + /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `max depth decrease oldMaxDepth:${this.treeMaxDepth} maxDepth :${newMaxDepth} stats:${utils.stringifyReduce(resizeStats)} cycle:${cycle}`) + } else { + /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `max depth increase oldMaxDepth:${this.treeMaxDepth} maxDepth :${newMaxDepth} stats:${utils.stringifyReduce(resizeStats)} cycle:${cycle}`) + } + + this.treeSyncDepth = newSyncDepth + this.treeMaxDepth = newMaxDepth + } + + /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, ` syncDepth:${this.treeSyncDepth} maxDepth :${this.treeMaxDepth}`) + + // Update the trie with new account data updates since the last cycle + const updateStats = this.upateShardTrie(cycle) + + /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `totalAccountsHashed`, updateStats.totalAccountsHashed) + + //broadcast sync data to nodes that cover similar portions of the tree + await this.broadcastSyncHashes(cycle) + } +} \ No newline at end of file diff --git a/src/state-manager/AccountPatcher.ts b/src/state-manager/AccountPatcher.ts index 6171aeea5..d6246d96b 100644 --- a/src/state-manager/AccountPatcher.ts +++ b/src/state-manager/AccountPatcher.ts @@ -95,6 +95,10 @@ import { robustQuery } from '../p2p/Utils' import { RequestReceiptForTxReqSerialized, serializeRequestReceiptForTxReq } from '../types/RequestReceiptForTxReq' import { deserializeRequestReceiptForTxResp, RequestReceiptForTxRespSerialized } from '../types/RequestReceiptForTxResp' import { Node } from '../shardus/shardus-types' +import { debugMethods } from './AccountPatcher.debug' +import { finderMethods } from './AccountPatcher.finder' +import { handlerMethods } from './AccountPatcher.handlers' +import { trieMethods } from './AccountPatcher.trie' type Line = { raw: string @@ -191,6 +195,13 @@ class AccountPatcher { lastRepairInfo: unknown + // Methods from split files + processShardDump: (stream: Response, number>, lines: Line[]) => { allPassed: boolean; allPassed2: boolean } + findBadAccounts: (cycle: number) => Promise + setupHandlers: () => void + upateShardTrie: (cycle: number) => HashTrieUpdateStats + updateTrieAndBroadCast: (cycle: number) => Promise + constructor( stateManager: StateManager, profiler: Profiler, @@ -257,6 +268,12 @@ class AccountPatcher { this.failEndCycle = -1 this.failRepairsCounter = 0 this.syncFailHistory = [] + + // Bind methods from split files + Object.assign(AccountPatcher.prototype, debugMethods) + Object.assign(AccountPatcher.prototype, finderMethods) + Object.assign(AccountPatcher.prototype, handlerMethods) + Object.assign(AccountPatcher.prototype, trieMethods) } hashObj(value: HashableObject): string { @@ -277,1701 +294,20 @@ class AccountPatcher { return -1 } if (a.radix > b.radix) { - return 1 - } - return 0 - } - - /*** - * ######## ## ## ######## ######## ####### #### ## ## ######## ###### - * ## ### ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## #### ## ## ## ## ## ## ## ## #### ## ## ## - * ###### ## ## ## ## ## ######## ## ## ## ## ## ## ## ###### - * ## ## #### ## ## ## ## ## ## ## #### ## ## - * ## ## ### ## ## ## ## ## ## ## ### ## ## ## - * ######## ## ## ######## ## ####### #### ## ## ## ###### - */ - - setupHandlers(): void { - // this.p2p.registerInternal( - // 'get_trie_hashes', - // async ( - // payload: HashTrieReq, - // respond: (arg0: HashTrieResp) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('get_trie_hashes', false, msgSize) - // const result = { nodeHashes: [], nodeId: Self.id } as HashTrieResp - // let responseCount = 0 - // let respondSize - - // if (Self.isFailed) { - // respondSize = await respond(result) - // } else { - // for (const radix of payload.radixList) { - // const level = radix.length - // const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - // if (layerMap == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes badrange:${level}`) - // break - // } - - // const hashTrieNode = layerMap.get(radix) - // if (hashTrieNode != null) { - // for (const childTreeNode of hashTrieNode.children) { - // if (childTreeNode != null) { - // result.nodeHashes.push({ radix: childTreeNode.radix, hash: childTreeNode.hash }) - // responseCount++ - // } - // } - // } - // } - - // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, responseCount) - - // // todo could recored a split time here.. so we know time spend on handling the request vs sending the response? - // // that would not be completely accurate because the time to get the data is outide of this handler... - // respondSize = await respond(result) - // } - // profilerInstance.scopedProfileSectionEnd('get_trie_hashes', respondSize) - // } - // ) - - // this.p2p.registerInternal( - // 'repair_oos_accounts', - // async ( - // payload: {repairInstructions: AccountRepairInstruction[]}, - // respond: (arg0: boolean) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('repair_oos_accounts', false, msgSize) - - // try { - // for (const repairInstruction of payload?.repairInstructions) { - // const { accountID, txId, hash, accountData, targetNodeId, receipt2 } = repairInstruction - - // // check if we are the target node - // if (targetNodeId !== Self.id) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: not target node for txId: ${txId}`) - // continue - // } - - // // check if we cover this accountId - // const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountID) - // const isInStorageGroup = storageNodes.map((node) => node.id).includes(Self.id) - // if (!isInStorageGroup) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: not in storage group for account: ${accountID}`) - // continue - // } - // // check if we have already repaired this account - // const accountHashCache = this.stateManager.accountCache.getAccountHash(accountID) - // if (accountHashCache != null && accountHashCache.h === hash) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: already repaired account: ${accountID}`) - // continue - // } - // if (accountHashCache != null && accountHashCache.t > accountData.timestamp) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: we have newer account: ${accountID}`) - // continue - // } - - // const archivedQueueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, 'repair_oos_accounts') - - // if (archivedQueueEntry == null) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: no archivedQueueEntry for txId: ${txId}`) - // this.mainLogger.debug(`repair_oos_accounts: no archivedQueueEntry for txId: ${txId}`) - // continue - // } - - // // check the vote and confirmation status of the tx - // const bestMessage = receipt2.confirmOrChallenge - // const receivedBestVote = receipt2.appliedVote - - // if (receivedBestVote != null) { - // // Check if vote is from eligible list of voters for this TX - // if(this.stateManager.transactionQueue.useNewPOQ && !archivedQueueEntry.eligibleNodeIdsToVote.has(receivedBestVote.node_id)) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote from ineligible node for txId: ${txId}`) - // continue - // } - - // // Check signature of the vote - // if (!this.crypto.verify( - // receivedBestVote as SignedObject, - // archivedQueueEntry.executionGroupMap.get(receivedBestVote.node_id).publicKey - // )) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote signature invalid for txId: ${txId}`) - // continue - // } - - // // Check transaction result from vote - // if (!receivedBestVote.transaction_result) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote result not true for txId ${txId}`) - // continue - // } - - // // Check account hash. Calculate account hash of account given in instruction - // // and compare it with the account hash in the vote. - // const calculatedAccountHash = this.app.calculateAccountHash(accountData.data) - // let accountHashMatch = false - // for (let i = 0; i < receivedBestVote.account_id.length; i++) { - // if (receivedBestVote.account_id[i] === accountID) { - // if (receivedBestVote.account_state_hash_after[i] !== calculatedAccountHash) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: account hash mismatch for txId: ${txId}`) - // accountHashMatch = false - // } else { - // accountHashMatch = true - // } - // break - // } - // } - // if (accountHashMatch === false) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: vote account hash mismatch for txId: ${txId}`) - // continue - // } - // } else { - // // Skip this account apply as we were not able to get the best vote for this tx - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: no vote for txId: ${txId}`) - // continue - // } - - // if (this.stateManager.transactionQueue.useNewPOQ) { - // if (bestMessage != null) { - // // Skip if challenge receipt - // if (bestMessage.message === 'challenge') { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: challenge for txId: ${txId}`) - // continue - // } - - // // Check if mesasge is from eligible list of responders for this TX - // if(!archivedQueueEntry.eligibleNodeIdsToConfirm.has(bestMessage.nodeId)) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: confirmation from ineligible node for txId: ${txId}`) - // continue - // } - - // // Check signature of the message - // if(!this.crypto.verify( - // bestMessage as SignedObject, - // archivedQueueEntry.executionGroupMap.get(bestMessage.nodeId).publicKey - // )) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: confirmation signature invalid for txId: ${txId}`) - // continue - // } - // } else { - // // Skip this account apply as we were not able to get the best confirmation for this tx - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts: no confirmation for txId: ${txId}`) - // continue - // } - // } - - // // update the account data (and cache?) - // const updatedAccounts: string[] = [] - // //save the account data. note this will make sure account hashes match the wrappers and return failed - // // hashes that don't match - // const failedHashes = await this.stateManager.checkAndSetAccountData( - // [accountData], - // `repair_oos_accounts:${txId}`, - // true, - // updatedAccounts - // ) - // if (logFlags.debug) this.mainLogger.debug(`repair_oos_accounts: ${updatedAccounts.length} updated, ${failedHashes.length} failed`) - // nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts:${updatedAccounts.length} updated, accountId: ${utils.makeShortHash(accountID)}, cycle: ${this.stateManager.currentCycleShardData.cycleNumber}`) - // if (failedHashes.length > 0) nestedCountersInstance.countEvent('accountPatcher', `repair_oos_accounts:${failedHashes.length} failed`) - // let success = false - // if (updatedAccounts.length > 0 && failedHashes.length === 0) { - // success = true - // } - // } - // await respond(true) - // } catch (e) { - // } - - // profilerInstance.scopedProfileSectionEnd('repair_oos_accounts') - // } - // ) - - const repairMissingAccountsBinary: Route> = { - name: InternalRouteEnum.binary_repair_oos_accounts, - handler: async (payloadBuffer, respond, header, sign) => { - const route = InternalRouteEnum.binary_repair_oos_accounts - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, false, payloadBuffer.length) - try { - const requestStream = getStreamWithTypeCheck(payloadBuffer, TypeIdentifierEnum.cRepairOOSAccountsReq) - if (!requestStream) { - return - } - // (Optional) Check verification data in the header - const payload = deserializeRepairOOSAccountsReq(requestStream) - if (!payload?.repairInstructions) { - return - } - - let [latestCycle] = Context.p2p.getLatestCycles(1) - if (!latestCycle) { - this.mainLogger.error('repair_oos_accounts: no latest cycle') - return - } - - if (this.repairRequestsMadeThisCycle.cycle !== latestCycle.counter) { - this.repairRequestsMadeThisCycle.cycle = latestCycle.counter - this.repairRequestsMadeThisCycle.numRequests = 0 - } - - // verifyPayload(AJVSchemaEnum.RepairOOSAccountsReq', payload) - for (const repairInstruction of payload.repairInstructions) { - const { accountID, txId, hash, accountData, targetNodeId, signedReceipt } = repairInstruction - - // check if we are the target node - if (targetNodeId !== Self.id) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: not target node for txId: ${txId}` - ) - continue - } - - // check if we cover this accountId - const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountID) - const isInStorageGroup = storageNodes.map((node) => node.id).includes(Self.id) - if (!isInStorageGroup) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: not in storage group for account: ${accountID}` - ) - continue - } - // check if we have already repaired this account - const accountHashCache = this.stateManager.accountCache.getAccountHash(accountID) - if (accountHashCache != null && accountHashCache.h === hash) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: already repaired account: ${accountID}` - ) - continue - } - if (accountHashCache != null && accountHashCache.t > accountData.timestamp) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: we have newer account: ${accountID}` - ) - continue - } - - const archivedQueueEntry = this.stateManager.transactionQueue.getQueueEntryArchived( - txId, - 'repair_oos_accounts' - ) - - if (archivedQueueEntry == null) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: no archivedQueueEntry for txId: ${txId}` - ) - this.mainLogger.debug(`repair_oos_accounts: no archivedQueueEntry for txId: ${txId}`) - continue - } - - const proposal = signedReceipt.proposal - if (signedReceipt.proposalHash !== this.stateManager.transactionConsensus.calculateVoteHash(proposal)) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: proposal hash mismatch for txId: ${txId}` - ) - continue - } - - const queryFn = async (node: Node) => { - const message = { txid: txId, timestamp: accountData.timestamp } - return await this.p2p.askBinary( - node, - InternalRouteEnum.binary_request_receipt_for_tx, - message, - serializeRequestReceiptForTxReq, - deserializeRequestReceiptForTxResp, - {} - ) - } - - if ( - this.repairRequestsMadeThisCycle.numRequests + 1 > - this.config.stateManager.patcherRepairByReceiptPerUpdate - ) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: too many repair requests this cycle` - ) - this.mainLogger.warn( - `binary/repair_oos_accounts: too many repair requests this cycle (${latestCycle.counter})` - ) - return - } - - // make sure tx hasn't been altered by robust querying for the proposal using request txid and timestamp - const txReceipt = await robustQuery(storageNodes, queryFn) - if (txReceipt.isRobustResult === false) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: robust query failed for txId: ${txId}` - ) - continue - } - this.repairRequestsMadeThisCycle.numRequests++ - - if ( - txReceipt.topResult.success !== true || - txReceipt.topResult.receipt == null || - txReceipt.topResult.receipt.proposalHash == null - ) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: robust query couldn't find queueEntry for txId: ${txId}` - ) - continue - } - - if (signedReceipt.proposalHash !== txReceipt.topResult.receipt.proposalHash) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: proposal hash mismatch for txId: ${txId}` - ) - continue - } - - // if (receivedBestVote != null) { - // Check if vote is from eligible list of voters for this TX - // if ( - // this.stateManager.transactionQueue.useNewPOQ && - // !archivedQueueEntry.eligibleNodeIdsToVote.has(receivedBestVote.node_id) - // ) { - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: vote from ineligible node for txId: ${txId}` - // ) - // continue - // } - - // Check signature of the vote - // if ( - // !this.crypto.verify( - // receivedBestVote as SignedObject, - // archivedQueueEntry.executionGroupMap.get(receivedBestVote.node_id).publicKey - // ) - // ) { - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: vote signature invalid for txId: ${txId}` - // ) - // continue - // } - - // Verify signed receipt - const executionGroupNodes = new Set(archivedQueueEntry.executionGroup.map((node) => node.publicKey)) - const receiptVerification = this.stateManager.transactionConsensus.verifyAppliedReceipt( - signedReceipt, - executionGroupNodes - ) - if (receiptVerification !== true) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `repair_oos_accounts: receipt verification failed for txId: ${txId}` - ) - continue - } - - // Check transaction result from vote - if (!proposal.applied) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: proposal result not true for txId ${txId}` - ) - continue - } - - // Check account hash. Calculate account hash of account given in instruction - // and compare it with the account hash in the vote. - const calculatedAccountHash = this.app.calculateAccountHash(accountData.data) - let accountHashMatch = false - for (let i = 0; i < proposal.accountIDs.length; i++) { - if (proposal.accountIDs[i] === accountID) { - if (proposal.afterStateHashes[i] !== calculatedAccountHash) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: account hash mismatch for txId: ${txId}` - ) - accountHashMatch = false - } else { - accountHashMatch = true - } - break - } - } - if (accountHashMatch === false) { - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts: vote account hash mismatch for txId: ${txId}` - ) - continue - } - // } else { - // // Skip this account apply as we were not able to get the best vote for this tx - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: no vote for txId: ${txId}` - // ) - // continue - // } - - // if (this.stateManager.transactionQueue.useNewPOQ) { - // if (bestMessage != null) { - // // Skip if challenge receipt - // if (bestMessage.message === 'challenge') { - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: challenge for txId: ${txId}` - // ) - // continue - // } - - // // Check if mesasge is from eligible list of responders for this TX - // if (!archivedQueueEntry.eligibleNodeIdsToConfirm.has(bestMessage.nodeId)) { - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: confirmation from ineligible node for txId: ${txId}` - // ) - // continue - // } - - // // Check signature of the message - // if ( - // !this.crypto.verify( - // bestMessage as SignedObject, - // archivedQueueEntry.executionGroupMap.get(bestMessage.nodeId).publicKey - // ) - // ) { - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: confirmation signature invalid for txId: ${txId}` - // ) - // continue - // } - // } else { - // // Skip this account apply as we were not able to get the best confirmation for this tx - // nestedCountersInstance.countEvent( - // 'accountPatcher', - // `binary/repair_oos_accounts: no confirmation for txId: ${txId}` - // ) - // continue - // } - // } - - // update the account data (and cache?) - const updatedAccounts: string[] = [] - //save the account data. note this will make sure account hashes match the wrappers and return failed - // hashes that don't match - const failedHashes = await this.stateManager.checkAndSetAccountData( - [accountData], - `binary/repair_oos_accounts:${txId}`, - true, - updatedAccounts - ) - if (logFlags.debug) - this.mainLogger.debug( - `binary/repair_oos_accounts: ${updatedAccounts.length} updated, ${failedHashes.length} failed` - ) - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts:${updatedAccounts.length} updated, accountId: ${utils.makeShortHash( - accountID - )}, cycle: ${this.stateManager.currentCycleShardData.cycleNumber}` - ) - if (failedHashes.length > 0) - nestedCountersInstance.countEvent( - 'accountPatcher', - `binary/repair_oos_accounts:${failedHashes.length} failed` - ) - let success = false - if (updatedAccounts.length > 0 && failedHashes.length === 0) { - success = true - } - } - } catch (e) { - // Error handling - console.error(`Error in repairMissingAccountsBinary handler: ${e.message}`) - } finally { - this.profiler.scopedProfileSectionEnd(route) - } - }, - } - - const getTrieHashesBinary: Route> = { - name: InternalRouteEnum.binary_get_trie_hashes, - handler: async (payloadBuffer, respond, header, sign) => { - const route = InternalRouteEnum.binary_get_trie_hashes - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, false, payloadBuffer.length) - const result = { nodeHashes: [], nodeId: Self.id } as GetTrieHashesResponse - try { - const requestStream = getStreamWithTypeCheck(payloadBuffer, TypeIdentifierEnum.cGetTrieHashesReq) - if (!requestStream) { - respond(result, serializeGetTrieHashesResp) - return - } - const readableReq = deserializeGetTrieHashesReq(requestStream) - let responseCount = 0 - if (!Self.isFailed) { - for (const radix of readableReq.radixList) { - const level = radix.length - const layerMap = this.shardTrie.layerMaps[level] - if (layerMap == null) { - /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes badrange:${level}`) - break - } - const hashTrieNode = layerMap.get(radix) - if (hashTrieNode != null) { - for (const childTreeNode of hashTrieNode.children) { - if (childTreeNode != null) { - result.nodeHashes.push({ radix: childTreeNode.radix, hash: childTreeNode.hash }) - responseCount++ - } - } - } - } - if (responseCount > 0) { - /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_hashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, responseCount) - } - } - respond(result, serializeGetTrieHashesResp) - } catch (e) { - // Error handling - console.error(`Error in getTrieHashesBinary handler: ${e.message}`) - respond({ nodeHashes: null }, serializeGetTrieHashesResp) - } finally { - this.profiler.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary(getTrieHashesBinary.name, getTrieHashesBinary.handler) - this.p2p.registerInternalBinary(repairMissingAccountsBinary.name, repairMissingAccountsBinary.handler) - - // this.p2p.registerInternal( - // 'sync_trie_hashes', - // async ( - // payload: HashTrieSyncTell, - // _respondWrapped: unknown, - // sender: string, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('sync_trie_hashes', false, msgSize) - // try { - // //TODO use our own definition of current cycle. - // //use playlod cycle to filter out TXs.. - // const cycle = payload.cycle - - // let hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(payload.cycle) - // if (hashTrieSyncConsensus == null) { - // hashTrieSyncConsensus = { - // cycle: payload.cycle, - // radixHashVotes: new Map(), - // coverageMap: new Map(), - // } - // this.hashTrieSyncConsensusByCycle.set(payload.cycle, hashTrieSyncConsensus) - - // const shardValues = this.stateManager.shardValuesByCycle.get(payload.cycle) - // if (shardValues == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `sync_trie_hashes not ready c:${payload.cycle}`) - // return - // } - - // //mark syncing radixes.. - // //todo compare to cycle!! only init if from current cycle. - // this.initStoredRadixValues(payload.cycle) - // } - - // const node = NodeList.nodes.get(sender) - - // for (const nodeHashes of payload.nodeHashes) { - // //don't record the vote if we cant use it! - // // easier than filtering it out later on in the stream. - // if (this.isRadixStored(cycle, nodeHashes.radix) === false) { - // continue - // } - - // //todo: secure that the voter is allowed to vote. - // let hashVote = hashTrieSyncConsensus.radixHashVotes.get(nodeHashes.radix) - // if (hashVote == null) { - // hashVote = { allVotes: new Map(), bestHash: nodeHashes.hash, bestVotes: 1 } - // hashTrieSyncConsensus.radixHashVotes.set(nodeHashes.radix, hashVote) - // hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: [node] }) - // } else { - // const voteEntry = hashVote.allVotes.get(nodeHashes.hash) - // if (voteEntry == null) { - // hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: [node] }) - // } else { - // const voteCount = voteEntry.count + 1 - // voteEntry.count = voteCount - // voteEntry.voters.push(node) - // //hashVote.allVotes.set(nodeHashes.hash, votes + 1) - // //will ties be a problem? (not if we need a majority!) - // if (voteCount > hashVote.bestVotes) { - // hashVote.bestVotes = voteCount - // hashVote.bestHash = nodeHashes.hash - // } - // } - // } - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('sync_trie_hashes') - // } - // } - // ) - - const syncTrieHashesBinaryHandler: Route> = { - name: InternalRouteEnum.binary_sync_trie_hashes, - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_sync_trie_hashes - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, false, payload.length) - - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - try { - const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cSyncTrieHashesReq) - if (!stream) { - return errorHandler(RequestErrorEnum.InvalidRequest) - } - const request = deserializeSyncTrieHashesReq(stream) - const cycle = request.cycle - - let hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(cycle) - if (hashTrieSyncConsensus == null) { - hashTrieSyncConsensus = { - cycle, - radixHashVotes: new Map(), - coverageMap: new Map(), - } - this.hashTrieSyncConsensusByCycle.set(cycle, hashTrieSyncConsensus) - - const shardValues = this.stateManager.shardValuesByCycle.get(cycle) - if (shardValues == null) { - nestedCountersInstance.countEvent('accountPatcher', `sync_trie_hashes not ready c:${cycle}`) - if (logFlags.debug) console.error(`Shard values not ready for cycle: ${cycle}`) - return - } - - //mark syncing radixes.. - //todo compare to cycle!! only init if from current cycle. - this.initStoredRadixValues(cycle) - } - - const node = NodeList.nodes.get(header.sender_id) - - for (const nodeHashes of request.nodeHashes) { - if (this.isRadixStored(cycle, nodeHashes.radix) === false) { - continue - } - - // check the length of the radix - if (nodeHashes.radix.length !== this.treeSyncDepth) { - if (logFlags.error) - this.mainLogger.error(`syncTrieHashesBinaryHandler: radix length mismatch: ${nodeHashes.radix}`) - nestedCountersInstance.countEvent('accountPatcher', `${route}-radix-length-mismatch`) - continue - } - - // todo: secure that the voter is allowed to vote. - let hashVote = hashTrieSyncConsensus.radixHashVotes.get(nodeHashes.radix) - if (hashVote == null) { - hashVote = { allVotes: new Map(), bestHash: nodeHashes.hash, bestVotes: 1 } - hashTrieSyncConsensus.radixHashVotes.set(nodeHashes.radix, hashVote) - hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: new Set([node]) }) - } else { - const voteEntry = hashVote.allVotes.get(nodeHashes.hash) - if (voteEntry == null) { - hashVote.allVotes.set(nodeHashes.hash, { count: 1, voters: new Set([node]) }) - } else { - voteEntry.voters.add(node) - const voteCount = voteEntry.voters.size - voteEntry.count = voteCount - if (voteCount > hashVote.bestVotes) { - hashVote.bestVotes = voteCount - hashVote.bestHash = nodeHashes.hash - } - } - } - } - } catch (e) { - /* prettier-ignore */ if (logFlags.error) console.error(`Error processing syncTrieHashesBinaryHandler: ${e}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary(syncTrieHashesBinaryHandler.name, syncTrieHashesBinaryHandler.handler) - - // //get child accountHashes for radix. //get the hashes and ids so we know what to fix. - // this.p2p.registerInternal( - // 'get_trie_accountHashes', - // async ( - // payload: HashTrieReq, - // respond: (arg0: HashTrieAccountsResp) => Promise, - // _sender: string, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('get_trie_accountHashes', false, msgSize) - // //nodeChildHashes: {radix:string, childAccounts:{accountID:string, hash:string}[]}[] - // const result = { - // nodeChildHashes: [], - // stats: { matched: 0, visisted: 0, empty: 0, childCount: 0 }, - // nodeId: Self.id - // } as HashTrieAccountsResp - - // const patcherMaxChildHashResponses = this.config.stateManager.patcherMaxChildHashResponses - - // for (const radix of payload.radixList) { - // result.stats.visisted++ - // const level = radix.length - // const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - // if (layerMap == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) - // break - // } - - // const hashTrieNode = layerMap.get(radix) - // if (hashTrieNode != null && hashTrieNode.accounts != null) { - // result.stats.matched++ - // const childAccounts = [] - // result.nodeChildHashes.push({ radix, childAccounts }) - // for (const account of hashTrieNode.accounts) { - // childAccounts.push({ accountID: account.accountID, hash: account.hash }) - // result.stats.childCount++ - // } - // if (hashTrieNode.accounts.length === 0) { - // result.stats.empty++ - // } - // } - - // //some protection on how many responses we can send - // if (result.stats.childCount > patcherMaxChildHashResponses) { - // break - // } - // } - - // /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, result.stats.childCount) - - // const respondSize = await respond(result) - // profilerInstance.scopedProfileSectionEnd('get_trie_accountHashes', respondSize) - // } - // ) - - const getTrieAccountHashesBinaryHandler: Route> = { - name: InternalRouteEnum.binary_get_trie_account_hashes, - handler: (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_get_trie_account_hashes - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - const result = { - nodeChildHashes: [], - stats: { matched: 0, visisted: 0, empty: 0, childCount: 0 }, - nodeId: Self.id, - } as HashTrieAccountsResp - try { - const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAccountTrieHashesReq) - if (!stream) { - requestErrorHandler(route, RequestErrorEnum.InvalidRequest, header) - return respond(BadRequest('invalid request stream'), serializeResponseError) - } - const req = deserializeGetTrieAccountHashesReq(stream) - const radixList = req.radixList - const patcherMaxChildHashResponses = this.config.stateManager.patcherMaxChildHashResponses - for (const radix of radixList) { - result.stats.visisted++ - const level = radix.length - const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - if (layerMap == null) { - /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) - break - } - - const hashTrieNode = layerMap.get(radix) - if (hashTrieNode != null && hashTrieNode.accounts != null) { - result.stats.matched++ - const childAccounts = [] - result.nodeChildHashes.push({ radix, childAccounts }) - for (const account of hashTrieNode.accounts) { - childAccounts.push({ accountID: account.accountID, hash: account.hash }) - result.stats.childCount++ - } - if (hashTrieNode.accounts.length === 0) { - result.stats.empty++ - } - } - - //some protection on how many responses we can send - if (result.stats.childCount > patcherMaxChildHashResponses) { - break - } - } - - /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent('accountPatcher', `binary_get_trie_accountHashes c:${this.stateManager.currentCycleShardData.cycleNumber}`, result.stats.childCount) - respond(result, serializeGetTrieAccountHashesResp) - } catch (e) { - this.statemanager_fatal( - 'binary_get_trie_accountHashes-failed', - 'binary_get_trie_accountHashes:' + e.name + ': ' + e.message + ' at ' + e.stack - ) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - respond(InternalError('exception executing request'), serializeResponseError) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary(getTrieAccountHashesBinaryHandler.name, getTrieAccountHashesBinaryHandler.handler) - - // this.p2p.registerInternal( - // 'get_account_data_by_hashes', - // async ( - // payload: HashTrieAccountDataRequest, - // respond: (arg0: HashTrieAccountDataResponse) => Promise, - // _sender: string, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('get_account_data_by_hashes', false, msgSize) - // nestedCountersInstance.countEvent('accountPatcher', `get_account_data_by_hashes`) - // const result: HashTrieAccountDataResponse = { accounts: [], stateTableData: [] } - // try { - // //nodeChildHashes: {radix:string, childAccounts:{accountID:string, hash:string}[]}[] - // const queryStats = { - // fix1: 0, - // fix2: 0, - // skip_localHashMismatch: 0, - // skip_requestHashMismatch: 0, - // returned: 0, - // missingResp: false, - // noResp: false, - // } - - // const hashMap = new Map() - // const accountIDs = [] - - // //should limit on asking side, this is just a precaution - // if (payload.accounts.length > 900) { - // payload.accounts = payload.accounts.slice(0, 900) - // } - - // for (const accountHashEntry of payload.accounts) { - // // let radix = accountHashEntry.accountID.substr(0, this.treeMaxDepth) - // // let layerMap = this.shardTrie.layerMaps[this.treeMaxDepth] - // // let hashTrieNode = layerMap.get(radix) - // if ( - // accountHashEntry == null || - // accountHashEntry.hash == null || - // accountHashEntry.accountID == null - // ) { - // queryStats.fix1++ - // continue - // } - // hashMap.set(accountHashEntry.accountID, accountHashEntry.hash) - // accountIDs.push(accountHashEntry.accountID) - // } - - // const accountData = await this.app.getAccountDataByList(accountIDs) - - // const skippedAccounts: AccountIDAndHash[] = [] - // const returnedAccounts: AccountIDAndHash[] = [] - - // const accountsToGetStateTableDataFor = [] - // //only return results that match the requested hash! - // const accountDataFinal: Shardus.WrappedData[] = [] - // if (accountData != null) { - // for (const wrappedAccount of accountData) { - // if (wrappedAccount == null || wrappedAccount.stateId == null || wrappedAccount.data == null) { - // queryStats.fix2++ - // continue - // } - - // const { accountId, stateId, data: recordData } = wrappedAccount - // const accountHash = this.app.calculateAccountHash(recordData) - // if (stateId !== accountHash) { - // skippedAccounts.push({ accountID: accountId, hash: stateId }) - // queryStats.skip_localHashMismatch++ - // continue - // } - - // if (hashMap.get(accountId) === wrappedAccount.stateId) { - // accountDataFinal.push(wrappedAccount) - // returnedAccounts.push({ accountID: accountId, hash: stateId }) - // accountsToGetStateTableDataFor.push(accountId) - // queryStats.returned++ - // } else { - // queryStats.skip_requestHashMismatch++ - // skippedAccounts.push({ accountID: accountId, hash: stateId }) - // } - - // // let wrappedAccountInQueueRef = wrappedAccount as Shardus.WrappedDataFromQueue - // // wrappedAccountInQueueRef.seenInQueue = false - - // // if (this.stateManager.lastSeenAccountsMap != null) { - // // let queueEntry = this.stateManager.lastSeenAccountsMap[wrappedAccountInQueueRef.accountId] - // // if (queueEntry != null) { - // // wrappedAccountInQueueRef.seenInQueue = true - // // } - // // } - // } - // } - // //PERF could disable this for more perf? - // //this.stateManager.testAccountDataWrapped(accountDataFinal) - - // if (queryStats.returned < payload.accounts.length) { - // nestedCountersInstance.countEvent('accountPatcher', `get_account_data_by_hashes incomplete`) - // queryStats.missingResp = true - // if (queryStats.returned === 0) { - // nestedCountersInstance.countEvent('accountPatcher', `get_account_data_by_hashes no results`) - // queryStats.noResp = true - // } - // } - - // this.mainLogger.debug( - // `get_account_data_by_hashes1 requests[${payload.accounts.length}] :${utils.stringifyReduce( - // payload.accounts - // )} ` - // ) - // this.mainLogger.debug( - // `get_account_data_by_hashes2 skippedAccounts:${utils.stringifyReduce(skippedAccounts)} ` - // ) - // this.mainLogger.debug( - // `get_account_data_by_hashes3 returnedAccounts:${utils.stringifyReduce(returnedAccounts)} ` - // ) - // this.mainLogger.debug( - // `get_account_data_by_hashes4 queryStats:${utils.stringifyReduce(queryStats)} ` - // ) - // this.mainLogger.debug( - // `get_account_data_by_hashes4 stateTabledata:${utils.stringifyReduce(result.stateTableData)} ` - // ) - // result.accounts = accountDataFinal - // } catch (ex) { - // this.statemanager_fatal( - // `get_account_data_by_hashes-failed`, - // 'get_account_data_by_hashes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack - // ) - // } - // const respondSize = await respond(result) - // profilerInstance.scopedProfileSectionEnd('get_account_data_by_hashes', respondSize) - // } - // ) - - const getAccountDataByHashesBinaryHandler: Route> = { - name: InternalRouteEnum.binary_get_account_data_by_hashes, - handler: async (payload, respond) => { - const route = InternalRouteEnum.binary_get_account_data_by_hashes - profilerInstance.scopedProfileSectionStart(route) - nestedCountersInstance.countEvent('internal', route) - const result = { accounts: [], stateTableData: [] } as GetAccountDataByHashesResp - try { - const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAccountDataByHashesReq) - if (!stream) { - return respond(result, serializeGetAccountDataByHashesResp) - } - - const req = deserializeGetAccountDataByHashesReq(stream) - - const queryStats = { - fix1: 0, - fix2: 0, - skip_localHashMismatch: 0, - skip_requestHashMismatch: 0, - returned: 0, - missingResp: false, - noResp: false, - } - - const hashMap = new Map() - const accountIDs = [] - - if (req.accounts.length > 900) { - req.accounts = req.accounts.slice(0, 900) - } - - for (const accountHashEntry of req.accounts) { - if (accountHashEntry == null || accountHashEntry.hash == null || accountHashEntry.accountID == null) { - queryStats.fix1++ - continue - } - hashMap.set(accountHashEntry.accountID, accountHashEntry.hash) - accountIDs.push(accountHashEntry.accountID) - } - - const accountData = await this.app.getAccountDataByList(accountIDs) - const skippedAccounts: AccountIDAndHash[] = [] - const returnedAccounts: AccountIDAndHash[] = [] - - const accountsToGetStateTableDataFor = [] - const accountDataFinal: WrappedData[] = [] - - if (accountData != null) { - for (const wrappedAccount of accountData) { - if (wrappedAccount == null || wrappedAccount.stateId == null || wrappedAccount.data == null) { - queryStats.fix2++ - continue - } - const { accountId, stateId, data: recordData } = wrappedAccount - const accountHash = this.app.calculateAccountHash(recordData) - if (stateId !== accountHash) { - skippedAccounts.push({ accountID: accountId, hash: stateId }) - queryStats.skip_localHashMismatch++ - continue - } - - if (hashMap.get(accountId) === wrappedAccount.stateId) { - accountDataFinal.push(wrappedAccount) - returnedAccounts.push({ accountID: accountId, hash: stateId }) - accountsToGetStateTableDataFor.push(accountId) - queryStats.returned++ - } else { - queryStats.skip_requestHashMismatch++ - skippedAccounts.push({ accountID: accountId, hash: stateId }) - } - } - } - - if (queryStats.returned < req.accounts.length) { - nestedCountersInstance.countEvent('internal', `${route} incomplete`) - queryStats.missingResp = true - if (queryStats.returned === 0) { - nestedCountersInstance.countEvent('internal', `${route} no results`) - queryStats.noResp = true - } - } - - this.mainLogger.debug(`${route} 1 requests[${req.accounts.length}] :${utils.stringifyReduce(req.accounts)} `) - this.mainLogger.debug(`${route} 2 skippedAccounts:${utils.stringifyReduce(skippedAccounts)} `) - this.mainLogger.debug(`${route} 3 returnedAccounts:${utils.stringifyReduce(returnedAccounts)} `) - this.mainLogger.debug(`${route} 4 queryStats:${utils.stringifyReduce(queryStats)} `) - this.mainLogger.debug(`${route} stateTabledata:${utils.stringifyReduce(result.stateTableData)} `) - result.accounts = accountDataFinal - respond(result, serializeGetAccountDataByHashesResp) - } catch (ex) { - this.statemanager_fatal( - `get_account_data_by_hashes-failed`, - 'get_account_data_by_hashes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack - ) - respond(result, serializeGetAccountDataByHashesResp) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary( - getAccountDataByHashesBinaryHandler.name, - getAccountDataByHashesBinaryHandler.handler - ) - - Context.network.registerExternalGet('debug-patcher-ignore-hash-updates', isDebugModeMiddleware, (_req, res) => { - try { - this.debug_ignoreUpdates = !this.debug_ignoreUpdates - res.write(`this.debug_ignoreUpdates: ${this.debug_ignoreUpdates}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - Context.network.registerExternalGet('debug-patcher-fail-tx', isDebugModeMiddleware, (_req, res) => { - try { - //toggle chance to fail TXs in a way that they do not get fixed by the first tier of repair. - - if (this.stateManager.failNoRepairTxChance === 0) { - this.stateManager.failNoRepairTxChance = 1 - } else { - this.stateManager.failNoRepairTxChance = 0 - } - - res.write(`this.failNoRepairTxChance: ${this.stateManager.failNoRepairTxChance}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - Context.network.registerExternalGet('debug-patcher-voteflip', isDebugModeMiddleware, (_req, res) => { - try { - if (this.stateManager.voteFlipChance === 0) { - this.stateManager.voteFlipChance = 1 - } else { - this.stateManager.voteFlipChance = 0 - } - - res.write(`this.voteFlipChance: ${this.stateManager.voteFlipChance}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - Context.network.registerExternalGet('debug-patcher-toggle-skip', isDebugModeMiddleware, (_req, res) => { - try { - if (this.stateManager.debugSkipPatcherRepair === false) { - this.stateManager.debugSkipPatcherRepair = true - } else { - this.stateManager.debugSkipPatcherRepair = false - } - - res.write(`this.debugSkipPatcherRepair: ${this.stateManager.debugSkipPatcherRepair}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - Context.network.registerExternalGet('debug-patcher-dumpTree', isDebugModeMiddlewareMedium, (_req, res) => { - try { - // this.statemanager_fatal('debug shardTrie',`temp shardTrie ${utils.stringifyReduce(this.shardTrie.layerMaps[0].values().next().value)}`) - // res.write(`${utils.stringifyReduce(this.shardTrie.layerMaps[0].values().next().value)}\n`) - - const trieRoot = this.shardTrie.layerMaps[0].values().next().value - - //strip noisy fields - const tempString = JSON.stringify(trieRoot, utils.debugReplacer) - const processedObject = Utils.safeJsonParse(tempString) - - // use stringify to put a stable sort on the object keys (important for comparisons) - const finalStr = utils.stringifyReduce(processedObject) - - this.statemanager_fatal('debug shardTrie', `temp shardTrie ${finalStr}`) - res.write(`${finalStr}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-patcher-dumpTree-partial', isDebugModeMiddlewareMedium, (req, res) => { - try { - const subTree: boolean = req.query.subtree === 'true' - let radix: string = req.query.radix as string - if (radix.length > this.treeMaxDepth) radix = radix.slice(0, this.treeMaxDepth) - const level = radix.length - const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - - let hashTrieNode = layerMap.get(radix.toLowerCase()) - if (!subTree) { - // deep clone the trie node before removing children property - hashTrieNode = Utils.safeJsonParse(Utils.safeStringify(hashTrieNode)) - delete hashTrieNode.children - } - if (!hashTrieNode) { - /* prettier-ignore */ if (logFlags.error) console.error('debug-patcher-dumpTree-partial - Radix not found. Returning 404') - res.status(404).json({ error: 'Radix not found' }) - return - } - //strip noisy fields - const tempString = JSON.stringify(hashTrieNode, utils.debugReplacer) - const processedObject = Utils.safeJsonParse(tempString) - - // use stringify to put a stable sort on the object keys (important for comparisons) - const finalStr = utils.stringifyReduce(processedObject) - - this.statemanager_fatal('debug shardTrie', `temp shardTrie ${finalStr}`) - res.write(`${finalStr}\n`) - } catch (e) { - console.log('Error', e) - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-patcher-fail-hashes', isDebugModeMiddlewareLow, (_req, res) => { - try { - const lastCycle = this.p2p.state.getLastCycle() - const cycle = lastCycle.counter - const minVotes = this.calculateMinVotes() - const notEnoughVotesRadix = {} - const outOfSyncRadix = {} - - const hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(cycle) - - if (!hashTrieSyncConsensus) { - res.json({ error: `Unable to find hashTrieSyncConsensus for last cycle ${lastCycle}` }) - return - } - - for (const radix of hashTrieSyncConsensus.radixHashVotes.keys()) { - const votesMap = hashTrieSyncConsensus.radixHashVotes.get(radix) - const ourTrieNode = this.shardTrie.layerMaps[this.treeSyncDepth].get(radix) - - const hasEnoughVotes = votesMap.bestVotes >= minVotes - const isRadixInSync = ourTrieNode ? ourTrieNode.hash === votesMap.bestHash : false - - if (!hasEnoughVotes || !isRadixInSync) { - const kvp = [] - for (const [key, value] of votesMap.allVotes.entries()) { - kvp.push({ - id: key, - count: value.count, - nodeIDs: [...value.voters].map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort), - }) - } - const simpleMap = { - bestHash: votesMap.bestHash, - ourHash: ourTrieNode ? ourTrieNode.hash : '', - bestVotes: votesMap.bestVotes, - minVotes, - allVotes: kvp, - } - if (!hasEnoughVotes) notEnoughVotesRadix[radix] = simpleMap // eslint-disable-line security/detect-object-injection - if (!isRadixInSync) outOfSyncRadix[radix] = simpleMap // eslint-disable-line security/detect-object-injection - } - } - res.json({ - cycle, - notEnoughVotesRadix, - outOfSyncRadix, - }) - return - } catch (e) { - console.log('Error', e) - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('get-tree-last-insync', isDebugModeMiddlewareLow, (_req, res) => { - res.write(`${this.failedLastTrieSync === false}\n`) - res.end() - }) - - Context.network.registerExternalGet('get-tree-last-insync-detail', isDebugModeMiddlewareLow, (_req, res) => { - let prettyJSON = JSON.stringify(this.lastInSyncResult, null, 2) - res.write(`${prettyJSON}\n`) - res.end() - }) - - Context.network.registerExternalGet('trie-repair-dump', isDebugModeMiddleware, (_req, res) => { - res.write(`${utils.stringifyReduce(this.lastRepairInfo)}\n`) - res.end() - }) - - // - Context.network.registerExternalGet('get-shard-dump', isDebugModeMiddleware, (_req, res) => { - res.write(`${this.stateManager.lastShardReport}\n`) - res.end() - }) - - /** - * - * - * Usage: http://:/account-report?id= - */ - Context.network.registerExternalGet('account-report', isDebugModeMiddleware, async (req, res) => { - if (req.query.id == null) return - let id = req.query.id as string - res.write(`report for: ${id} \n`) - try { - if (id.length === 10) { - //short form.. - let found = false - const prefix = id.substring(0, 4) - const low = prefix + '0'.repeat(60) - const high = prefix + 'f'.repeat(60) - - const suffix = id.substring(5, 10) - const possibleAccounts = await this.app.getAccountDataByRange(low, high, 0, shardusGetTime(), 100, 0, '') - - res.write(`searching ${possibleAccounts.length} accounts \n`) - - for (const account of possibleAccounts) { - if (account.accountId.endsWith(suffix)) { - res.write(`found full account ${id} => ${account.accountId} \n`) - id = account.accountId - found = true - - break - } - } - - if (found == false) { - res.write(`could not find account\n`) - res.end() - return - } - } - - const trieAccount = this.getAccountTreeInfo(id) - const accountHash = this.stateManager.accountCache.getAccountHash(id) - const accountHashFull = this.stateManager.accountCache.getAccountDebugObject(id) //this.stateManager.accountCache.accountsHashCache3.accountHashMap.get(id) - const accountData = await this.app.getAccountDataByList([id]) - res.write(`trieAccount: ${Utils.safeStringify(trieAccount)} \n`) - res.write(`accountHash: ${Utils.safeStringify(accountHash)} \n`) - res.write(`accountHashFull: ${Utils.safeStringify(accountHashFull)} \n`) - res.write(`accountData: ${JSON.stringify(accountData, appdata_replacer)} \n\n`) - res.write(`tests: \n`) - if (accountData != null && accountData.length === 1 && accountHash != null) { - res.write(`accountData hash matches cache ${accountData[0].stateId === accountHash.h} \n`) - } - if (accountData != null && accountData.length === 1 && trieAccount != null) { - res.write(`accountData matches trieAccount ${accountData[0].stateId === trieAccount.hash} \n`) - } - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - /** - * - * - * Usage: http://:/account-coverage?id= - */ - Context.network.registerExternalGet('account-coverage', isDebugModeMiddleware, async (req, res) => { - if (req.query.id === null) return - const id = req.query.id as string - - const possibleAccountsIds: string[] = [] - try { - if (id.length === 10) { - //short form.. - const prefix = id.substring(0, 4) - const low = prefix + '0'.repeat(60) - const high = prefix + 'f'.repeat(60) - - const suffix = id.substring(5, 10) - const possibleAccounts = await this.app.getAccountDataByRange(low, high, 0, shardusGetTime(), 100, 0, '') - - for (const account of possibleAccounts) { - if (account.accountId.endsWith(suffix)) { - possibleAccountsIds.push(account.accountId) - } - } - } else { - possibleAccountsIds.push(id) - } - - if (possibleAccountsIds.length === 0) { - res.write( - Utils.safeStringify({ - success: false, - error: 'could not find account', - }) - ) - } else { - const resObj = {} - for (const accountId of possibleAccountsIds) { - const consensusNodes = this.stateManager.transactionQueue.getConsenusGroupForAccount(accountId) - const storedNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountId) - - // eslint-disable-next-line security/detect-object-injection - resObj[accountId] = { - consensusNodes: consensusNodes.map((node) => { - return { - id: node.id, - externalIp: node.externalIp, - externalPort: node.externalPort, - internalIp: node.internalIp, - internalPort: node.internalPort, - } - }), - storedNodes: storedNodes.map((node) => { - return { - id: node.id, - externalIp: node.externalIp, - externalPort: node.externalPort, - internalIp: node.internalIp, - internalPort: node.internalPort, - } - }), - } - } - res.write( - Utils.safeStringify({ - success: true, - result: resObj, - }) - ) - } - } catch (e) { - res.write( - Utils.safeStringify({ - success: false, - error: e, - }) - ) - } - res.end() - }) - - Context.network.registerExternalGet('hack-version', isDebugModeMiddleware, (_req, res) => { - res.write(`1.0.1\n`) - res.end() - }) - } - - getAccountTreeInfo(accountID: string): TrieAccount { - const radix = accountID.substring(0, this.treeMaxDepth) - - const treeNode = this.shardTrie.layerMaps[this.treeMaxDepth].get(radix) - if (treeNode == null || treeNode.accountTempMap == null) { - return null - } - return treeNode.accountTempMap.get(accountID) - } - - /*** - * ## ## ######## ### ######## ######## ###### ## ## ### ######## ######## ######## ######## #### ######## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ######## ## ## ## ###### ###### ######### ## ## ######## ## ## ## ######## ## ###### - * ## ## ## ######### ## ## ## ## ## ######### ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ####### ## ## ## ## ######## ###### ## ## ## ## ## ## ######## ## ## ## #### ######## - */ - - upateShardTrie(cycle: number): HashTrieUpdateStats { - //we start with the later of nodes at max depth, and will build upwards one layer at a time - const currentLayer = this.treeMaxDepth - let treeNodeQueue: HashTrieNode[] = [] - - const updateStats = { - leafsUpdated: 0, - leafsCreated: 0, - updatedNodesPerLevel: new Array(this.treeMaxDepth + 1).fill(0), - hashedChildrenPerLevel: new Array(this.treeMaxDepth + 1).fill(0), - totalHashes: 0, - //totalObjectsHashed: 0, - totalNodesHashed: 0, - totalAccountsHashed: 0, - totalLeafs: 0, - } - - //feed account data into lowest layer, generates list of treeNodes - let currentMap = this.shardTrie.layerMaps[currentLayer] // eslint-disable-line security/detect-object-injection - if (currentMap == null) { - currentMap = new Map() - this.shardTrie.layerMaps[currentLayer] = currentMap // eslint-disable-line security/detect-object-injection - } - - //process accounts that need updating. Create nodes as needed - for (let i = 0; i < this.accountUpdateQueue.length; i++) { - const tx = this.accountUpdateQueue[i] // eslint-disable-line security/detect-object-injection - const key = tx.accountID.slice(0, currentLayer) - let leafNode = currentMap.get(key) - if (leafNode == null) { - //init a leaf node. - //leaf nodes will have a list of accounts that share the same radix. - leafNode = { - radix: key, - children: [], - childHashes: [], - accounts: [], - hash: '', - accountTempMap: new Map(), - updated: true, - isIncomplete: false, - nonSparseChildCount: 0, - } //this map will cause issues with update - currentMap.set(key, leafNode) - updateStats.leafsCreated++ - treeNodeQueue.push(leafNode) - } - - //this can happen if the depth gets smaller after being larger - if (leafNode.accountTempMap == null) { - leafNode.accountTempMap = new Map() - } - if (leafNode.accounts == null) { - leafNode.accounts = [] - } - - if (leafNode.accountTempMap.has(tx.accountID) === false) { - this.totalAccounts++ - } - leafNode.accountTempMap.set(tx.accountID, tx) - if (leafNode.updated === false) { - treeNodeQueue.push(leafNode) - updateStats.leafsUpdated++ - } - leafNode.updated = true - - //too frequent in large tests. only use this in local tests with smaller data - //if (logFlags.verbose) /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('accountPatcher', `upateShardTrie ${utils.makeShortHash(tx.accountID)}`, `upateShardTrie update: ${utils.makeShortHash(tx.accountID)} h:${utils.makeShortHash(tx.hash)}`) - } - - let removedAccounts = 0 - let removedAccountsFailed = 0 - - if (this.accountRemovalQueue.length > 0) { - //this.statemanager_fatal(`temp accountRemovalQueue`,`accountRemovalQueue c:${cycle} ${utils.stringifyReduce(this.accountRemovalQueue)}`) - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`remove account from trie tracking c:${cycle} ${utils.stringifyReduce(this.accountRemovalQueue)}`) - } - - //remove accoutns from the trie. this happens if our node no longer carries them in storage range. - for (let i = 0; i < this.accountRemovalQueue.length; i++) { - const accountID = this.accountRemovalQueue[i] // eslint-disable-line security/detect-object-injection - - const key = accountID.slice(0, currentLayer) - const treeNode = currentMap.get(key) - if (treeNode == null) { - continue //already gone! - } - - if (treeNode.updated === false) { - treeNodeQueue.push(treeNode) - } - treeNode.updated = true - - if (treeNode.accountTempMap == null) { - treeNode.accountTempMap = new Map() - } - if (treeNode.accounts == null) { - treeNode.accounts = [] - } - const removed = treeNode.accountTempMap.delete(accountID) - if (removed) { - removedAccounts++ - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('accountPatcher', `upateShardTrie ${utils.makeShortHash(accountID)}`, `upateShardTrie remove ${utils.makeShortHash(accountID)} `) - } else { - removedAccountsFailed++ - } - } - if (removedAccounts > 0) { - nestedCountersInstance.countEvent(`accountPatcher`, `removedAccounts c:${cycle}`, removedAccounts) - } - if (removedAccountsFailed > 0) { - /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `removedAccountsFailed c:${cycle}`, removedAccountsFailed) - } - this.accountRemovalQueue = [] - - // for(let treeNode of this.incompleteNodes){ - // treeNodeQueue.push(treeNode) - // } - - //look at updated leaf nodes. Sort accounts and update hash values - for (let i = 0; i < treeNodeQueue.length; i++) { - const treeNode = treeNodeQueue[i] // eslint-disable-line security/detect-object-injection - - if (treeNode.updated === true) { - //treeNode.accountTempMap != null){ - treeNode.accounts = Array.from(treeNode.accountTempMap.values()) - - //delete treeNode.accountTempMap... need to keep it - //treeNode.accountTempMap = null - - //sort treeNode.accounts by accountID - treeNode.accounts.sort(this.sortByAccountID) - //compute treenode hash of accounts - treeNode.hash = this.hashObj(treeNode.accounts.map((a) => a.hash)) //todo why is this needed!!! - - treeNode.updated = false - - updateStats.totalHashes++ - updateStats.totalAccountsHashed = updateStats.totalAccountsHashed + treeNode.accounts.length - updateStats.updatedNodesPerLevel[currentLayer] = updateStats.updatedNodesPerLevel[currentLayer] + 1 // eslint-disable-line security/detect-object-injection - } - } - - // update the tree one later at a time. start at the max depth and copy values to the parents. - // Then the parent depth becomes the working depth and we repeat the process - // a queue is used to efficiently update only the nodes that need it. - // hashes are efficiently calculated only once after all children have set their hash data in the childHashes - let parentTreeNodeQueue = [] - //treenode queue has updated treeNodes from each loop, gets fed into next loop - for (let i = currentLayer - 1; i >= 0; i--) { - currentMap = this.shardTrie.layerMaps[i] // eslint-disable-line security/detect-object-injection - if (currentMap == null) { - currentMap = new Map() - this.shardTrie.layerMaps[i] = currentMap // eslint-disable-line security/detect-object-injection - } - //loop each node in treeNodeQueue (nodes from the previous level down) - for (let j = 0; j < treeNodeQueue.length; j++) { - const treeNode = treeNodeQueue[j] // eslint-disable-line security/detect-object-injection - - //compute parent nodes. - const parentKey = treeNode.radix.slice(0, i) - // fast? 0-15 conversion - let index = treeNode.radix.charCodeAt(i) - index = index < 90 ? index - 48 : index - 87 - //get parent node - let parentTreeNode = currentMap.get(parentKey) - if (parentTreeNode == null) { - parentTreeNode = { - radix: parentKey, - children: new Array(16), - childHashes: new Array(16), - updated: false, - hash: '', - isIncomplete: false, - nonSparseChildCount: 0, - } - currentMap.set(parentKey, parentTreeNode) - } - - //if we have not set this child yet then count it - // eslint-disable-next-line security/detect-object-injection - if (parentTreeNode.children[index] == null) { - // eslint-disable-line security/detect-object-injection - parentTreeNode.nonSparseChildCount++ - } - - //assign position - parentTreeNode.children[index] = treeNode // eslint-disable-line security/detect-object-injection - parentTreeNode.childHashes[index] = treeNode.hash // eslint-disable-line security/detect-object-injection - - //insert new parent nodes if we have not yet, guided by updated flag - if (parentTreeNode.updated === false) { - parentTreeNodeQueue.push(parentTreeNode) - parentTreeNode.updated = true - } - - if (treeNode.isIncomplete) { - // if(parentTreeNode.isIncomplete === false && parentTreeNode.updated === false ){ - // parentTreeNode.updated = true - // parentTreeNodeQueue.push(parentTreeNode) - // } - parentTreeNode.isIncomplete = true - } - - treeNode.updated = false //finished update of this node. - } - - updateStats.updatedNodesPerLevel[i] = parentTreeNodeQueue.length // eslint-disable-line security/detect-object-injection - - //when we are one step below the sync depth add in incompete parents for hash updates! - // if(i === this.treeSyncDepth + 1){ - // for(let treeNode of this.incompleteNodes){ - // parentTreeNodeQueue.push(treeNode) - // } - // } - - //loop and compute hashes of parents - for (let j = 0; j < parentTreeNodeQueue.length; j++) { - const parentTreeNode = parentTreeNodeQueue[j] // eslint-disable-line security/detect-object-injection - parentTreeNode.hash = this.hashObj(parentTreeNode.childHashes) - - updateStats.totalHashes++ - updateStats.totalNodesHashed = updateStats.totalNodesHashed + parentTreeNode.nonSparseChildCount - updateStats.hashedChildrenPerLevel[i] = // eslint-disable-line security/detect-object-injection - updateStats.hashedChildrenPerLevel[i] + parentTreeNode.nonSparseChildCount // eslint-disable-line security/detect-object-injection - } - //set the parents to the treeNodeQueue so we can loop and work on the next layer up - treeNodeQueue = parentTreeNodeQueue - parentTreeNodeQueue = [] + return 1 } + return 0 + } - updateStats.totalLeafs = this.shardTrie.layerMaps[this.treeMaxDepth].size - - this.accountUpdateQueue = [] + getAccountTreeInfo(accountID: string): TrieAccount { + const radix = accountID.substring(0, this.treeMaxDepth) - return updateStats - } + const treeNode = this.shardTrie.layerMaps[this.treeMaxDepth].get(radix) + if (treeNode == null || treeNode.accountTempMap == null) { + return null + } + return treeNode.accountTempMap.get(accountID) + } getNonConsensusRanges(cycle: number): { low: string; high: string }[] { let incompleteRanges = [] @@ -2182,16 +518,6 @@ class AccountPatcher { return isStored } - /*** - * ######## #### ######## ######## ###### ####### ## ## ###### ######## ## ## ## ## ###### - * ## ## ## ## ## ## ## ## ## ### ## ## ## ## ### ## ## ## ## ## - * ## ## ## ## ## ## ## ## #### ## ## ## #### ## ## ## ## - * ## ## ## ###### ###### ## ## ## ## ## ## ###### ###### ## ## ## ## ## ###### - * ## ## ## ## ## ## ## ## ## #### ## ## ## #### ## ## ## - * ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ### ## ## ## ## - * ######## #### ## ## ###### ####### ## ## ###### ######## ## ## ####### ###### - */ - /** * diffConsenus * get a list where localMap does not have entries that match consensusArray. @@ -2280,15 +606,6 @@ class AccountPatcher { return uniqueExtraBadRadixes } - /*** - * ###### ####### ## ## ######## ## ## ######## ######## ###### ####### ## ## ######## ######## ### ###### ######## - * ## ## ## ## ### ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## #### #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ### ## ######## ## ## ## ###### ## ## ## ## ## ###### ######## ## ## ## #### ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ######### ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ###### ####### ## ## ## ####### ## ######## ###### ####### ### ######## ## ## ## ## ###### ######## - */ /** * computeCoverage * @@ -2326,15 +643,6 @@ class AccountPatcher { // have fallback optoins } - /*** - * ###### ######## ######## ## ## ####### ######## ######## ######## ####### ######## ####### ## ## ######## ######## ## ## - * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### - * ## #### ###### ## ## ## ## ## ## ## ## ###### ###### ## ## ######## ## ## ## ## ###### ######## ## - * ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ###### ######## ## ## ## ####### ######## ######## ## ####### ## ## ##### ## ####### ######## ## ## ## - */ //error handling.. what if we cand find a node or run out? /** * getNodeForQuery @@ -2597,15 +905,6 @@ class AccountPatcher { return { radixAndChildHashes: nodeChildHashes, getAccountHashStats: getAccountHashStats } } - /*** - * #### ###### #### ## ## ###### ## ## ## ## ###### - * ## ## ## ## ### ## ## ## ## ## ### ## ## ## - * ## ## ## #### ## ## #### #### ## ## - * ## ###### ## ## ## ## ###### ## ## ## ## ## - * ## ## ## ## #### ## ## ## #### ## - * ## ## ## ## ## ### ## ## ## ## ### ## ## - * #### ###### #### ## ## ###### ## ## ## ###### - */ /** * isInSync * @@ -2794,413 +1093,6 @@ class AccountPatcher { return isInsyncResult // {inSync, } } - /*** - * ######## #### ## ## ######## ######## ### ######## ### ###### ###### ####### ## ## ## ## ######## ###### - * ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## - * ###### ## ## ## ## ## ## ######## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ###### - * ## ## ## #### ## ## ## ## ######### ## ## ######### ## ## ## ## ## ## ## #### ## ## - * ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## - * ## #### ## ## ######## ######## ## ## ######## ## ## ###### ###### ####### ####### ## ## ## ###### - */ - /** - * findBadAccounts - * - * starts at the sync level hashes that dont match and queries for child nodes to get more details about - * what accounts could possibly be bad. At the lowest level gets a list of accounts and hashes - * We double check out cache values before returning a list of bad accounts that need repairs. - * - * @param cycle - */ - async findBadAccounts(cycle: number): Promise { - let badAccounts: AccountIDAndHash[] = [] - let accountsTheyNeedToRepair: AccountIdAndHashToRepair[] = [] - let accountsWeNeedToRepair: AccountIDAndHash[] = [] - const hashesPerLevel: number[] = Array(this.treeMaxDepth + 1).fill(0) - const checkedKeysPerLevel = Array(this.treeMaxDepth) - const badHashesPerLevel: number[] = Array(this.treeMaxDepth + 1).fill(0) - const requestedKeysPerLevel: number[] = Array(this.treeMaxDepth + 1).fill(0) - - let level = this.treeSyncDepth - let badLayerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - const syncTrackerRanges = this.getSyncTrackerRanges() - - const stats = { - testedSyncRadix: 0, - skippedSyncRadix: 0, - badSyncRadix: 0, - ok_noTrieAcc: 0, - ok_trieHashBad: 0, - fix_butHashMatch: 0, - fixLastSeen: 0, - needsVotes: 0, - subHashesTested: 0, - trailColdLevel: 0, - checkedLevel: 0, - leafsChecked: 0, - leafResponses: 0, - getAccountHashStats: {}, - } - let extraBadKeys: RadixAndHashWithNodeId[] = [] - let extraBadAccounts: AccountIdAndHashToRepair[] = [] - - const minVotes = this.calculateMinVotes() - - const goodVotes: RadixAndHash[] = [] - const hashTrieSyncConsensus = this.hashTrieSyncConsensusByCycle.get(cycle) - for (const radix of hashTrieSyncConsensus.radixHashVotes.keys()) { - const votesMap = hashTrieSyncConsensus.radixHashVotes.get(radix) - let isSyncingRadix = false - - if (votesMap.bestVotes < minVotes) { - stats.needsVotes++ - if (logFlags.debug) { - //overkill, need it for now - const kvp = [] - for (const [key, value] of votesMap.allVotes.entries()) { - kvp.push({ - id: key, - count: value.count, - nodeIDs: [...value.voters].map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort), - }) - } - const simpleMap = { - bestHash: votesMap.bestHash, - bestVotes: votesMap.bestVotes, - allVotes: kvp, - } - /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `not enough votes ${radix} ${utils.makeShortHash(votesMap.bestHash)} uniqueVotes: ${votesMap.allVotes.size}`, 1) - this.statemanager_fatal( - 'debug findBadAccounts', - `debug findBadAccounts ${cycle}: ${radix} bestVotes${ - votesMap.bestVotes - } < minVotes:${minVotes} uniqueVotes: ${votesMap.allVotes.size} ${utils.stringifyReduce(simpleMap)}` - ) - } - // skipping 50% votes restriction to allow patcher to do account based patching - // continue - } - - //do we need to filter out a vote? - for (const range of syncTrackerRanges) { - if (radix >= range.low && radix <= range.high) { - isSyncingRadix = true - break - } - } - if (isSyncingRadix === true) { - stats.skippedSyncRadix++ - continue - } - stats.testedSyncRadix++ - goodVotes.push({ radix, hash: votesMap.bestHash }) - } - - let toFix = this.diffConsenus(goodVotes, badLayerMap) - - stats.badSyncRadix = toFix.length - - if (logFlags.debug) { - toFix.sort(this.sortByRadix) - this.statemanager_fatal( - 'debug findBadAccounts', - `debug findBadAccounts ${cycle}: toFix: ${utils.stringifyReduce(toFix)}` - ) - for (let radixToFix of toFix) { - const votesMap = hashTrieSyncConsensus.radixHashVotes.get(radixToFix.radix) - let hasNonConsensusRange = false - let hasNonStorageRange = false - - const nonConsensusRanges = this.getNonConsensusRanges(cycle) - const nonStorageRange = this.getNonStoredRanges(cycle) - for (const range of nonConsensusRanges) { - if (radixToFix.radix >= range.low && radixToFix.radix <= range.high) { - hasNonConsensusRange = true - nestedCountersInstance.countEvent(`accountPatcher`, `findBadAccounts hasNonConsensusRange`, 1) - } - } - for (const range of nonStorageRange) { - if (radixToFix.radix >= range.low && radixToFix.radix <= range.high) { - hasNonStorageRange = true - nestedCountersInstance.countEvent(`accountPatcher`, `findBadAccounts hasNonStorageRange`, 1) - } - } - - const kvp = [] - for (const [key, value] of votesMap.allVotes.entries()) { - kvp.push({ - id: key, - count: value.count, - nodeIDs: [...value.voters].map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort), - }) - } - const simpleMap = { - bestHash: votesMap.bestHash, - bestVotes: votesMap.bestVotes, - allVotes: kvp, - } - this.statemanager_fatal( - 'debug findBadAccounts', - `debug findBadAccounts ${cycle}: ${ - radixToFix.radix - } isInNonConsensusRange: ${hasNonConsensusRange} isInNonStorageRange: ${hasNonStorageRange} bestVotes ${ - votesMap.bestVotes - } minVotes:${minVotes} uniqueVotes: ${votesMap.allVotes.size} ${utils.stringifyReduce(simpleMap)}` - ) - } - } - - //record some debug info - badHashesPerLevel[level] = toFix.length // eslint-disable-line security/detect-object-injection - checkedKeysPerLevel[level] = toFix.map((x) => x.radix) // eslint-disable-line security/detect-object-injection - requestedKeysPerLevel[level] = goodVotes.length // eslint-disable-line security/detect-object-injection - hashesPerLevel[level] = goodVotes.length // eslint-disable-line security/detect-object-injection - - this.computeCoverage(cycle) - - stats.checkedLevel = level - //refine our query until we get to the lowest level - while (level < this.treeMaxDepth && toFix.length > 0) { - level++ - stats.checkedLevel = level - badLayerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - const remoteChildrenToDiff: RadixAndHashWithNodeId[] = await this.getChildrenOf(toFix, cycle) - - if (remoteChildrenToDiff == null) { - nestedCountersInstance.countEvent( - `accountPatcher`, - `findBadAccounts remoteChildrenToDiff == null for radixes: ${Utils.safeStringify(toFix)}, cycle: ${cycle}`, - 1 - ) - } - if (remoteChildrenToDiff.length === 0) { - nestedCountersInstance.countEvent( - `accountPatcher`, - `findBadAccounts remoteChildrenToDiff.length = 0 for radixes: ${Utils.safeStringify(toFix)}, cycle: ${cycle}`, - 1 - ) - } - - this.mainLogger.debug( - `findBadAccounts ${cycle}: level: ${level}, toFix: ${toFix.length}, childrenToDiff: ${Utils.safeStringify( - remoteChildrenToDiff - )}, badLayerMap: ${Utils.safeStringify(badLayerMap)}` - ) - toFix = this.diffConsenus(remoteChildrenToDiff, badLayerMap) - - stats.subHashesTested += toFix.length - - if (toFix.length === 0) { - stats.trailColdLevel = level - extraBadKeys = this.findExtraBadKeys(remoteChildrenToDiff, badLayerMap) - - let result = { - nodeChildHashes: [], - stats: { - matched: 0, - visisted: 0, - empty: 0, - childCount: 0, - }, - } as HashTrieAccountsResp - - let allLeafNodes: HashTrieNode[] = [] - - for (const radixAndHash of extraBadKeys) { - let level = radixAndHash.radix.length - while (level < this.treeMaxDepth) { - level++ - const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - if (layerMap == null) { - /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) - break - } - const hashTrieNode = layerMap.get(radixAndHash.radix) - if (hashTrieNode != null && hashTrieNode.accounts != null) { - result.stats.visisted++ - const childAccounts = [] - result.nodeChildHashes.push({ radix: radixAndHash.radix, childAccounts }) - for (const account of hashTrieNode.accounts) { - childAccounts.push({ accountID: account.accountID, hash: account.hash }) - extraBadAccounts.push({ - accountID: account.accountID, - hash: account.hash, - targetNodeId: radixAndHash.nodeId, - }) - result.stats.childCount++ - } - if (hashTrieNode.accounts.length === 0) { - result.stats.empty++ - } - } - } - } - - for (const radixAndHash of extraBadKeys) { - const radix = radixAndHash.radix - result.stats.visisted++ - const level = radix.length - const layerMap = this.shardTrie.layerMaps[level] // eslint-disable-line security/detect-object-injection - if (layerMap == null) { - /* prettier-ignore */ nestedCountersInstance.countEvent('accountPatcher', `get_trie_accountHashes badrange:${level}`) - break - } - - const currentNode = layerMap.get(radix) - const leafs: HashTrieNode[] = this.extractLeafNodes(currentNode) - for (const leaf of leafs) { - if (leaf != null && leaf.accounts != null) { - result.stats.matched++ - const childAccounts = [] - result.nodeChildHashes.push({ radix, childAccounts }) - for (const account of leaf.accounts) { - childAccounts.push({ accountID: account.accountID, hash: account.hash }) - extraBadAccounts.push({ - accountID: account.accountID, - hash: account.hash, - targetNodeId: radixAndHash.nodeId, - }) - result.stats.childCount++ - } - if (leaf.accounts.length === 0) { - result.stats.empty++ - } - } - } - } - - if (extraBadKeys.length > 0) { - toFix = toFix.concat(extraBadKeys) - break - } - } - - //record some debug info - badHashesPerLevel[level] = toFix.length // eslint-disable-line security/detect-object-injection - checkedKeysPerLevel[level] = toFix.map((x) => x.radix) // eslint-disable-line security/detect-object-injection - requestedKeysPerLevel[level] = remoteChildrenToDiff.length // eslint-disable-line security/detect-object-injection - hashesPerLevel[level] = remoteChildrenToDiff.length // eslint-disable-line security/detect-object-injection - // badLayerMap.size ...badLayerMap could be null! - } - - stats.leafsChecked = toFix.length - //get bad accounts from the leaf nodes - const { radixAndChildHashes, getAccountHashStats } = await this.getChildAccountHashes(toFix, cycle) - stats.getAccountHashStats = getAccountHashStats - - stats.leafResponses = radixAndChildHashes.length - - let accountHashesChecked = 0 - for (const radixAndChildHash of radixAndChildHashes) { - accountHashesChecked += radixAndChildHash.childAccounts.length - - const badTreeNode = badLayerMap.get(radixAndChildHash.radix) - if (badTreeNode != null) { - const localAccountsMap = new Map() - const remoteAccountsMap = new Map() - if (badTreeNode.accounts != null) { - for (let i = 0; i < badTreeNode.accounts.length; i++) { - if (badTreeNode.accounts[i] == null) continue - localAccountsMap.set(badTreeNode.accounts[i].accountID, badTreeNode.accounts[i]) // eslint-disable-line security/detect-object-injection - } - } - for (let account of radixAndChildHash.childAccounts) { - remoteAccountsMap.set(account.accountID, { account, nodeId: radixAndChildHash.nodeId }) - } - if (radixAndChildHash.childAccounts.length > localAccountsMap.size) { - /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent(`accountPatcher`, `remote trie node has more accounts, radix: ${radixAndChildHash.radix}`) - } else if (radixAndChildHash.childAccounts.length < localAccountsMap.size) { - /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent(`accountPatcher`, `remote trie node has less accounts than local trie node, radix: ${radixAndChildHash.radix}`) - } else if (radixAndChildHash.childAccounts.length === localAccountsMap.size) { - /* prettier-ignore */ if (this.config.debug.verboseNestedCounters) nestedCountersInstance.countEvent(`accountPatcher`, `remote trie node has same number of accounts as local trie node, radix: ${radixAndChildHash.radix}`) - } - for (let i = 0; i < radixAndChildHash.childAccounts.length; i++) { - const potentalGoodAcc = radixAndChildHash.childAccounts[i] // eslint-disable-line security/detect-object-injection - const potentalBadAcc = localAccountsMap.get(potentalGoodAcc.accountID) - - //check if our cache value has matching hash already. The trie can lag behind. - // todo would be nice to find a way to reduce this, possibly by better control of syncing ranges. - // (we are not supposed to test syncing ranges , but maybe that is out of phase?) - - //only do this check if the account is new. It was skipping potential oos situations. - const accountMemData: AccountHashCache = this.stateManager.accountCache.getAccountHash( - potentalGoodAcc.accountID - ) - if (accountMemData != null && accountMemData.h === potentalGoodAcc.hash) { - if (accountMemData.c >= cycle - 1) { - if (potentalBadAcc != null) { - if (potentalBadAcc.hash != potentalGoodAcc.hash) { - stats.ok_trieHashBad++ // mem account is good but trie account is bad - } - } else { - stats.ok_noTrieAcc++ // no trie account at all - } - - //this was in cache, but stale so we can reinstate the cache since it still matches the group consensus - const accountHashCacheHistory: AccountHashCacheHistory = - this.stateManager.accountCache.getAccountHashHistoryItem(potentalGoodAcc.accountID) - if ( - accountHashCacheHistory != null && - accountHashCacheHistory.lastStaleCycle >= accountHashCacheHistory.lastSeenCycle - ) { - stats.fixLastSeen++ - accountHashCacheHistory.lastSeenCycle = cycle - } - //skip out - continue - } else { - //dont skip out! - //cache matches but trie hash is bad - stats.fix_butHashMatch++ - //actually we can repair trie here: - this.updateAccountHash(potentalGoodAcc.accountID, potentalGoodAcc.hash) - continue - } - } - - //is the account missing or wrong hash? - if (potentalBadAcc != null) { - if (potentalBadAcc.hash != potentalGoodAcc.hash) { - badAccounts.push(potentalGoodAcc) - } - } else { - badAccounts.push(potentalGoodAcc) - } - } - for (let i = 0; i < badTreeNode.accounts.length; i++) { - const localAccount = badTreeNode.accounts[i] // eslint-disable-line security/detect-object-injection - if (localAccount == null) continue - const remoteNodeItem = remoteAccountsMap.get(localAccount.accountID) - if (remoteNodeItem == null) { - accountsWeNeedToRepair.push(localAccount) - continue - } - const { account: remoteAccount, nodeId: targetNodeId } = remoteNodeItem - if (remoteAccount == null) { - accountsTheyNeedToRepair.push({ ...localAccount, targetNodeId }) - } - } - } else { - badAccounts = badAccounts.concat(radixAndChildHash.childAccounts) - } - } - if (accountsTheyNeedToRepair.length > 0) { - nestedCountersInstance.countEvent(`accountPatcher`, `accountsTheyNeedToRepair`, accountsTheyNeedToRepair.length) - } - return { - badAccounts, - hashesPerLevel, - checkedKeysPerLevel, - requestedKeysPerLevel, - badHashesPerLevel, - accountHashesChecked, - stats, - extraBadAccounts, - extraBadKeys, - accountsTheyNeedToRepair, - } - } - extractLeafNodes(rootNode: HashTrieNode): HashTrieNode[] { const leafNodes: HashTrieNode[] = [] @@ -3227,15 +1119,6 @@ class AccountPatcher { //big todo .. be able to test changes on a temp tree and validate the hashed before we commit updates //also need to actually update the full account data and not just our tree!! - /*** - * ## ## ######## ######## ### ######## ######## ### ###### ###### ####### ## ## ## ## ######## ## ## ### ###### ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## - * ## ## ######## ## ## ## ## ## ###### ## ## ## ## ## ## ## ## ## ## ## ## ######### ## ## ###### ######### - * ## ## ## ## ## ######### ## ## ######### ## ## ## ## ## ## ## #### ## ## ## ######### ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## - * ####### ## ######## ## ## ## ######## ## ## ###### ###### ####### ####### ## ## ## ## ## ## ## ###### ## ## - */ /** * updateAccountHash * This is the main function called externally to tell the hash trie what the hash value is for a given accountID @@ -3257,34 +1140,8 @@ class AccountPatcher { removeAccountHash(accountID: string): void { this.accountRemovalQueue.push(accountID) - } - // applyRepair(accountsToFix:AccountIDAndHash[]){ - // //todo do we need to look at cycle or timestamp and have a future vs. next queue? - // for(let account of accountsToFix){ - // //need proper tx injestion. - // //this.txCommit(node, account) - // this.updateAccountHash(account.accountID, account.hash) - // } - // } - - //test if radix is covered by our node.. that is tricky... - //need isincomplete logic integrated with trie generation. - //will be 1 or 2 values only - - // type HashTrieSyncTell = { - // cycle: number - // nodeHashes: {radix:string, hash:string}[] - // } - - /*** - * ######## ######## ####### ### ######## ###### ### ###### ######## ###### ## ## ## ## ###### ## ## ### ###### ## ## ######## ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### #### ## ## ## ## ## ## ## ## ## ## ## - * ######## ######## ## ## ## ## ## ## ## ## ## ###### ## ###### ## ## ## ## ## ######### ## ## ###### ######### ###### ###### - * ## ## ## ## ## ## ######### ## ## ## ######### ## ## ## ## ## #### ## ## ## ######### ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## - * ######## ## ## ####### ## ## ######## ###### ## ## ###### ## ###### ## ## ## ###### ## ## ## ## ###### ## ## ######## ###### - */ + } + /** * broadcastSyncHashes * after each tree computation we figure out what radix + hash values we can send out @@ -3446,105 +1303,7 @@ class AccountPatcher { // } await Promise.all(promises) } - - /*** - * ## ## ######## ######## ### ######## ######## ######## ######## #### ######## ### ## ## ######## ######## ######## ####### ### ######## ###### ### ###### ######## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ######## ## ## ## ## ## ###### ## ######## ## ###### ## ## ## ## ## ## ## ######## ######## ## ## ## ## ## ## ## ## ## ###### ## - * ## ## ## ## ## ######### ## ## ## ## ## ## ## ######### ## #### ## ## ## ## ## ## ## ## ######### ## ## ## ######### ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ####### ## ######## ## ## ## ######## ## ## ## #### ######## ## ## ## ## ######## ######## ## ## ####### ## ## ######## ###### ## ## ###### ## - */ - /** - * updateTrieAndBroadCast - * calculates what our tree leaf(max) depth and sync depths are. - * if there is a change we have to do some partition work to send old leaf data to new leafs. - * Then calls upateShardTrie() and broadcastSyncHashes() - * - * @param cycle - */ - async updateTrieAndBroadCast(cycle: number): Promise { - //calculate sync levels!! - const shardValues = this.stateManager.shardValuesByCycle.get(cycle) - const shardGlobals = shardValues.shardGlobals as StateManagerTypes.shardFunctionTypes.ShardGlobals - - const minHashesPerRange = 4 - // y = floor(log16((minHashesPerRange * max(1, x/consensusRange )))) - let syncDepthRaw = - Math.log(minHashesPerRange * Math.max(1, shardGlobals.numPartitions / (shardGlobals.consensusRadius * 2 + 1))) / - Math.log(16) - syncDepthRaw = Math.max(1, syncDepthRaw) // at least 1 - const newSyncDepth = Math.ceil(syncDepthRaw) - - //This only happens when the depth of our tree change (based on num nodes above) - //We have to partition the leaf node data into leafs of the correct level and rebuild the tree - if (this.treeSyncDepth != newSyncDepth) { - //todo add this in to prevent size flipflop..(better: some deadspace) && newSyncDepth > this.treeSyncDepth){ - const resizeStats = { - nodesWithAccounts: 0, - nodesWithoutAccounts: 0, - } - const newMaxDepth = newSyncDepth + 3 //todo the "+3" should be based on total number of stored accounts pre node (in a consensed way, needs to be on cycle chain) - //add more maps if needed (+1 because we have a map level 0) - while (this.shardTrie.layerMaps.length < newMaxDepth + 1) { - this.shardTrie.layerMaps.push(new Map()) - } - - //detach all accounts. - const currentLeafMap = this.shardTrie.layerMaps[this.treeMaxDepth] - - //put all accounts into queue to rebuild Tree! - for (const treeNode of currentLeafMap.values()) { - if (treeNode.accounts != null) { - for (const account of treeNode.accounts) { - //this.updateAccountHash(account.accountID, account.hash) - - //need to unshift these, becasue they could be older than what is alread in the queue!! - this.accountUpdateQueue.unshift(account) - } - // //clear out leaf node only properties: - // treeNode.accounts = null - // treeNode.accountTempMap = null - - // //have to init these nodes to work as parents - // treeNode.children = Array(16) - // treeNode.childHashes = Array(16) - - //nestedCountersInstance.countEvent(`accountPatcher`, `updateTrieAndBroadCast: ok account list?`) - resizeStats.nodesWithAccounts++ - } else { - //nestedCountersInstance.countEvent(`accountPatcher`, `updateTrieAndBroadCast: null account list?`) - resizeStats.nodesWithoutAccounts++ - } - } - - //better to just wipe out old parent nodes! - for (let idx = 0; idx < newMaxDepth; idx++) { - this.shardTrie.layerMaps[idx].clear() // eslint-disable-line security/detect-object-injection - } - - if (newMaxDepth < this.treeMaxDepth) { - //cant get here, but consider deleting layers out of the map - /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `max depth decrease oldMaxDepth:${this.treeMaxDepth} maxDepth :${newMaxDepth} stats:${utils.stringifyReduce(resizeStats)} cycle:${cycle}`) - } else { - /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `max depth increase oldMaxDepth:${this.treeMaxDepth} maxDepth :${newMaxDepth} stats:${utils.stringifyReduce(resizeStats)} cycle:${cycle}`) - } - - this.treeSyncDepth = newSyncDepth - this.treeMaxDepth = newMaxDepth - } - - /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, ` syncDepth:${this.treeSyncDepth} maxDepth :${this.treeMaxDepth}`) - - // Update the trie with new account data updates since the last cycle - const updateStats = this.upateShardTrie(cycle) - - /* prettier-ignore */ nestedCountersInstance.countEvent(`accountPatcher`, `totalAccountsHashed`, updateStats.totalAccountsHashed) - - //broadcast sync data to nodes that cover similar portions of the tree - await this.broadcastSyncHashes(cycle) - } + async requestOtherNodesToRepair(accountsToFix: AccountIdAndHashToRepair[]): Promise { try { const accountIdsToFix = accountsToFix.map((x) => x.accountID) @@ -3626,15 +1385,6 @@ class AccountPatcher { } } - /*** - * ######## ######## ###### ######## ### ## ## ######## ######## ### ######## ###### ## ## ### ###### ###### ####### ## ## ## ## ######## ###### - * ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## - * ## ###### ###### ## ## ## ## ## ## ## ## ######## ## ## ## ## ######### ## ## ## ## ## ## ## ## ## ## ## ## ###### - * ## ## ## ## ######### ## #### ## ## ## ######### ## ## ## ## ######### ## ## ## ## ## ## ## #### ## ## - * ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## - * ## ######## ###### ## ## ## ## ## ######## ## ## ## ## ###### ## ## ## ## ###### ###### ####### ####### ## ## ## ###### - */ /** * testAndPatchAccounts * does a quick check to see if we are isInSync() with the sync level votes we have been given. @@ -4117,15 +1867,6 @@ class AccountPatcher { return results } - /*** - * ###### ######## ######## ### ###### ###### ####### ## ## ## ## ######## ######## ######## ######## ### #### ######## ######## ### ######## ### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## #### ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ######## ###### ######## ## ## ## ######## ## ## ## ## ## ## ## - * ## ## ## ## ######### ## ## ## ## ## ## ## #### ## ## ## ## ## ######### ## ## ## ## ## ######### ## ######### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ###### ######## ## ## ## ###### ###### ####### ####### ## ## ## ## ## ######## ## ## ## #### ## ## ######## ## ## ## ## ## - */ //todo test the tree to see if repairs will work. not simple to do efficiently //todo robust query the hashes? technically if we repair to bad data it will just get detected and fixed again!!! @@ -4306,300 +2047,6 @@ class AccountPatcher { return { repairDataResponse, stateTableDataMap, getAccountStats } } - /*** - * ######## ######## ####### ###### ######## ###### ###### ###### ## ## ### ######## ######## ######## ## ## ## ## ######## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ### ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### #### ## ## - * ######## ######## ## ## ## ###### ###### ###### ###### ######### ## ## ######## ## ## ## ## ## ## ## ### ## ######## - * ## ## ## ## ## ## ## ## ## ## ## ## ######### ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ####### ###### ######## ###### ###### ###### ## ## ## ## ## ## ######## ######## ####### ## ## ## - */ - - /** - * processShardDump - * debug only code to create a shard report. - * @param stream - * @param lines - */ - processShardDump( - stream: Response, number>, - lines: Line[] - ): { allPassed: boolean; allPassed2: boolean } { - const dataByParition = new Map() - - const rangesCovered = [] - const nodesListsCovered = [] - const nodeLists = [] - let newestCycle = -1 - const partitionObjects = [] - for (const line of lines) { - const index = line.raw.indexOf('{"allNodeIds') - if (index >= 0) { - const partitionStr = line.raw.slice(index) - //this.generalLog(string) - let partitionObj: { cycle: number; owner: string } - try { - partitionObj = Utils.safeJsonParse(partitionStr) - } catch (error) { - this.mainLogger.error('error parsing partitionObj', error, partitionStr) - continue - } - - if (newestCycle > 0 && partitionObj.cycle != newestCycle) { - stream.write( - `wrong cycle for node: ${line.file.owner} reportCycle:${newestCycle} thisNode:${partitionObj.cycle} \n` - ) - continue - } - partitionObjects.push(partitionObj) - - if (partitionObj.cycle > newestCycle) { - newestCycle = partitionObj.cycle - } - partitionObj.owner = line.file.owner //line.raw.slice(0, index) - } - } - - for (const partitionObj of partitionObjects) { - // we only want data for nodes that were active in the latest cycle. - if (partitionObj.cycle === newestCycle) { - for (const partition of partitionObj.partitions) { - let results = dataByParition.get(partition.parititionID) - if (results == null) { - results = [] - dataByParition.set(partition.parititionID, results) - } - results.push({ - owner: partitionObj.owner, - accounts: partition.accounts, - ownerId: partitionObj.rangesCovered.id, - accounts2: partition.accounts2, - partitionHash2: partition.partitionHash2, - }) - } - rangesCovered.push(partitionObj.rangesCovered) - nodesListsCovered.push(partitionObj.nodesCovered) - nodeLists.push(partitionObj.allNodeIds) - } - } - - // need to only count stuff from the newestCycle. - - // ///////////////////////////////////////////////// - // compare partition data: old system with data manual queried from app - let allPassed = true - // let uniqueVotesByPartition = new Array(numNodes).fill(0) - for (const [key, value] of dataByParition) { - const results = value - const votes = {} - for (const entry of results) { - if (entry.accounts.length === 0) { - // new settings allow for not using accounts from sql - continue - } - entry.accounts.sort(function (a: { id: number }, b: { id: number }) { - return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 - }) - const string = utils.stringifyReduce(entry.accounts) - let voteEntry = votes[string] // eslint-disable-line security/detect-object-injection - if (voteEntry == null) { - voteEntry = {} - voteEntry.voteCount = 0 - voteEntry.ownerIds = [] - votes[string] = voteEntry // eslint-disable-line security/detect-object-injection - } - voteEntry.voteCount++ - votes[string] = voteEntry // eslint-disable-line security/detect-object-injection - - voteEntry.ownerIds.push(entry.ownerId) - } - for (const key2 of Object.keys(votes)) { - const voteEntry = votes[key2] // eslint-disable-line security/detect-object-injection - let voters = '' - if (key2 !== '[]') { - voters = `---voters:${Utils.safeStringify(voteEntry.ownerIds)}` - } - - stream.write(`partition: ${key} votes: ${voteEntry.voteCount} values: ${key2} \t\t\t${voters}\n`) - // stream.write(` ---voters: ${JSON.stringify(voteEntry.ownerIds)}\n`) - } - const numUniqueVotes = Object.keys(votes).length - if (numUniqueVotes > 2 || (numUniqueVotes > 1 && votes['[]'] == null)) { - allPassed = false - stream.write(`partition: ${key} failed. Too many different version of data: ${numUniqueVotes} \n`) - } - } - stream.write(`partition tests all passed: ${allPassed}\n`) - // rangesCovered - - // ///////////////////////////////////////////////// - // compare partition data 2: new system using the state manager cache - let allPassed2 = true - // let uniqueVotesByPartition = new Array(numNodes).fill(0) - for (const [key, value] of dataByParition) { - const results = value - const votes = {} - for (const entry of results) { - // no account sort, we expect this to have a time sort! - // entry.accounts.sort(function (a, b) { return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 }) - const fullString = utils.stringifyReduce(entry.accounts2) - let string = entry.partitionHash2 - if (string === undefined) { - string = '[]' - } - - let voteEntry = votes[string] // eslint-disable-line security/detect-object-injection - if (voteEntry == null) { - voteEntry = {} - voteEntry.voteCount = 0 - voteEntry.ownerIds = [] - voteEntry.fullString = fullString - votes[string] = voteEntry // eslint-disable-line security/detect-object-injection - } - voteEntry.voteCount++ - votes[string] = voteEntry // eslint-disable-line security/detect-object-injection - - voteEntry.ownerIds.push(entry.ownerId) - } - for (const key2 of Object.keys(votes)) { - const voteEntry = votes[key2] // eslint-disable-line security/detect-object-injection - let voters = '' - if (key2 !== '[]') { - voters = `---voters:${Utils.safeStringify(voteEntry.ownerIds)}` - } - - stream.write( - `partition: ${key} votes: ${voteEntry.voteCount} values: ${key2} \t\t\t${voters}\t -details:${voteEntry.fullString} \n` - ) - // stream.write(` ---voters: ${JSON.stringify(voteEntry.ownerIds)}\n`) - } - const numUniqueVotes = Object.keys(votes).length - if (numUniqueVotes > 2 || (numUniqueVotes > 1 && votes['[]'] == null)) { - allPassed2 = false - stream.write(`partition: ${key} failed. Too many different version of data: ${numUniqueVotes} \n`) - } - } - - stream.write(`partition tests all passed: ${allPassed2}\n`) - - rangesCovered.sort(function (a, b) { - return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 - }) - - const isStored = function (i: number, rangeCovered: { stMin: number; stMax: number }): boolean { - const key = i - const minP = rangeCovered.stMin - const maxP = rangeCovered.stMax - if (minP === maxP) { - if (i !== minP) { - return false - } - } else if (maxP > minP) { - // are we outside the min to max range - if (key < minP || key > maxP) { - return false - } - } else { - // are we inside the min to max range (since the covered rage is inverted) - if (key > maxP && key < minP) { - return false - } - } - return true - } - const isConsensus = function (i: number, rangeCovered: { cMin: number; cMax: number }): boolean { - const key = i - const minP = rangeCovered.cMin - const maxP = rangeCovered.cMax - if (minP === maxP) { - if (i !== minP) { - return false - } - } else if (maxP > minP) { - // are we outside the min to max range - if (key < minP || key > maxP) { - return false - } - } else { - // are we inside the min to max range (since the covered rage is inverted) - if (key > maxP && key < minP) { - return false - } - } - return true - } - - for (const range of rangesCovered) { - let partitionGraph = '' - for (let i = 0; i < range.numP; i++) { - const isC = isConsensus(i, range) - const isSt = isStored(i, range) - - if (i === range.hP) { - partitionGraph += 'H' - } else if (isC && isSt) { - partitionGraph += 'C' - } else if (isC) { - partitionGraph += '!' - } else if (isSt) { - partitionGraph += 'e' - } else { - partitionGraph += '_' - } - } - - stream.write( - `node: ${range.id} ${range.ipPort}\tgraph: ${partitionGraph}\thome: ${range.hP} data:${Utils.safeStringify( - range - )}\n` - ) - } - stream.write(`\n\n`) - nodesListsCovered.sort(function (a, b) { - return a.id === b.id ? 0 : a.id < b.id ? -1 : 1 - }) - for (const nodesCovered of nodesListsCovered) { - let partitionGraph = '' - const consensusMap = {} - const storedMap = {} - for (const entry of nodesCovered.consensus) { - consensusMap[entry.idx] = { hp: entry.hp } - } - for (const entry of nodesCovered.stored) { - storedMap[entry.idx] = { hp: entry.hp } - } - - for (let i = 0; i < nodesCovered.numP; i++) { - const isC = consensusMap[i] != null // eslint-disable-line security/detect-object-injection - const isSt = storedMap[i] != null // eslint-disable-line security/detect-object-injection - if (i === nodesCovered.idx) { - partitionGraph += 'O' - } else if (isC && isSt) { - partitionGraph += 'C' - } else if (isC) { - partitionGraph += '!' - } else if (isSt) { - partitionGraph += 'e' - } else { - partitionGraph += '_' - } - } - - stream.write( - `node: ${nodesCovered.id} ${nodesCovered.ipPort}\tgraph: ${partitionGraph}\thome: ${ - nodesCovered.hP - } data:${Utils.safeStringify(nodesCovered)}\n` - ) - } - stream.write(`\n\n`) - for (const list of nodeLists) { - stream.write(`${Utils.safeStringify(list)} \n`) - } - - return { allPassed, allPassed2 } - } - calculateMinVotes(): number { let minVotes = Math.ceil(this.stateManager.currentCycleShardData.shardGlobals.nodesPerConsenusGroup * 0.51) const majorityOfActiveNodes = Math.ceil(this.stateManager.currentCycleShardData.nodes.length * 0.51) diff --git a/src/state-manager/Deprecated.ts b/src/state-manager/Deprecated.ts deleted file mode 100644 index b23cc5e9f..000000000 --- a/src/state-manager/Deprecated.ts +++ /dev/null @@ -1,7031 +0,0 @@ -import * as Shardus from '../shardus/shardus-types' -import Profiler from '../utils/profiler' -import { P2PModuleContext as P2P } from '../p2p/Context' -import Storage from '../storage' -import Crypto from '../crypto' -import Logger from '../logger' -import StateManager from '.' -import { Logger as log4jsLogger } from 'log4js' - -// const cHashSetStepSize = 4 -// const cHashSetTXStepSize = 2 -// const cHashSetDataStepSize = 2 - -class Deprecated { - app: Shardus.App - crypto: Crypto - config: Shardus.ServerConfiguration - profiler: Profiler - - logger: Logger - p2p: P2P - storage: Storage - stateManager: StateManager - - mainLogger: log4jsLogger - fatalLogger: log4jsLogger - shardLogger: log4jsLogger - statsLogger: log4jsLogger - statemanager_fatal: (key: string, log: string) => void - - // sentReceipts: Map - // sendArchiveData: boolean - // purgeArchiveData: boolean - - // /** tracks state for repairing partitions. index by cycle counter key to get the repair object, index by parition */ - // repairTrackingByCycleById: { [cycleKey: string]: { [id: string]: RepairTracker } } - // /** UpdateRepairData by cycle key */ - // repairUpdateDataByCycle: { [cycleKey: string]: UpdateRepairData[] } - - // applyAllPreparedRepairsRunning: boolean - - // repairStartedMap: Map - // repairCompletedMap: Map - // dataRepairStack: RepairTracker[] - - constructor( - stateManager: StateManager, - profiler: Profiler, - app: Shardus.App, - logger: Logger, - storage: Storage, - p2p: P2P, - crypto: Crypto, - config: Shardus.ServerConfiguration - ) { - this.crypto = crypto - this.app = app - this.logger = logger - this.config = config - this.profiler = profiler - this.p2p = p2p - this.storage = storage - this.stateManager = stateManager - - this.mainLogger = logger.getLogger('main') - this.fatalLogger = logger.getLogger('fatal') - this.shardLogger = logger.getLogger('shardDump') - this.statsLogger = logger.getLogger('statsDump') - this.statemanager_fatal = stateManager.statemanager_fatal - - // this.sentReceipts = new Map() - - // this.sendArchiveData = false - // this.purgeArchiveData = false - - // this.repairTrackingByCycleById = {} - // this.repairUpdateDataByCycle = {} - // this.applyAllPreparedRepairsRunning = false - - // this.repairStartedMap = new Map() - // this.repairCompletedMap = new Map() - // this.dataRepairStack = [] - } - - //NOT used but seem possibly usefull... - purgeTransactionData(): void { - const tsStart = 0 - const tsEnd = 0 - this.storage.clearAcceptedTX(tsStart, tsEnd) - } - - purgeStateTableData(): void { - // do this by timestamp maybe.. - // this happnes on a slower scale. - const tsEnd = 0 // todo get newest time to keep - this.storage.clearAccountStateTableOlderThan(tsEnd) - } - - /*** - * ## ## ### ## ## ######## ## ######## ######## ###### - * ## ## ## ## ### ## ## ## ## ## ## ## ## ## - * ## ## ## ## #### ## ## ## ## ## ## ## ## - * ######### ## ## ## ## ## ## ## ## ###### ######## ###### - * ## ## ######### ## #### ## ## ## ## ## ## ## - * ## ## ## ## ## ### ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ######## ######## ######## ## ## ###### - */ - - setupHandlers(): void { - // // /get_account_state_hash (Acc_start, Acc_end, Ts_start, Ts_end) - // // Acc_start - get data for accounts starting with this account id; inclusive - // // Acc_end - get data for accounts up to this account id; inclusive - // // Ts_start - get data newer than this timestamp - // // Ts_end - get data older than this timestamp - // // Returns a single hash of the data from the Account State Table determined by the input parameters; sort by Tx_ts then Tx_id before taking the hash - // // Updated names: accountStart , accountEnd, tsStart, tsEnd - // this.p2p.registerInternal( - // 'get_account_state_hash', - // async ( - // payload: AccountStateHashReq, - // respond: (arg0: AccountStateHashResp) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // this.profiler.scopedProfileSectionStart('get_account_state_hash', false, msgSize) - // let responseSize = cUninitializedSize - // try { - // const result = {} as AccountStateHashResp - // if (this.softSync_checkInitialFlag && this.initalSyncFinished === false) { - // //not ready? - // result.ready = false - // result.stateHash = this.stateManager.currentCycleShardData.ourNode.id - // await respond(result) - // return - // } - // // yikes need to potentially hash only N records at a time and return an array of hashes - // const stateHash = await this.stateManager.transactionQueue.getAccountsStateHash( - // payload.accountStart, - // payload.accountEnd, - // payload.tsStart, - // payload.tsEnd - // ) - // result.stateHash = stateHash - // result.ready = true - // responseSize = await respond(result) - // } catch (e) { - // this.statemanager_fatal('get_account_state_hash', e) - // } finally { - // this.profiler.scopedProfileSectionEnd('get_account_state_hash', responseSize) - // } - // } - // ) - // // /get_account_state (Acc_start, Acc_end, Ts_start, Ts_end) - // // Acc_start - get data for accounts starting with this account id; inclusive - // // Acc_end - get data for accounts up to this account id; inclusive - // // Ts_start - get data newer than this timestamp - // // Ts_end - get data older than this timestamp - // // Returns data from the Account State Table determined by the input parameters; limits result to 1000 records (as configured) - // // Updated names: accountStart , accountEnd, tsStart, tsEnd - // this.p2p.registerInternal( - // 'get_account_state', - // async ( - // payload: GetAccountStateReq, - // respond: (arg0: { accountStates: Shardus.StateTableObject[] }) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // if (this.config.stateManager == null) { - // throw new Error('this.config.stateManager == null') //TODO TSConversion would be nice to eliminate some of these config checks. - // } - // this.profiler.scopedProfileSectionStart('get_account_state', false, msgSize) - // const result = {} as { accountStates: Shardus.StateTableObject[] } - // // max records set artificially low for better test coverage - // // todo m11: make configs for how many records to query - // const accountStates = await this.storage.queryAccountStateTable( - // payload.accountStart, - // payload.accountEnd, - // payload.tsStart, - // payload.tsEnd, - // this.config.stateManager.stateTableBucketSize - // ) - // result.accountStates = accountStates - // const responseSize = await respond(result) - // this.profiler.scopedProfileSectionEnd('get_account_state', responseSize) - // } - // ) - // // /get_account_data (Acc_start, Acc_end) - // // Acc_start - get data for accounts starting with this account id; inclusive - // // Acc_end - get data for accounts up to this account id; inclusive - // // Returns data from the application Account Table; limits result to 300 records (as configured); - // // For applications with multiple “Account” tables the returned data is grouped by table name. - // // For example: [ {Acc_id, State_after, Acc_data}, { … }, ….. ] - // // Updated names: accountStart , accountEnd - // this.p2p.registerInternal('get_account_data', async (payload: GetAccountDataReq, respond: (arg0: { accountData: Shardus.WrappedData[] | null }) => any) => { - // throw new Error('get_account_data endpoint retired') - // // let result = {} as {accountData: Shardus.WrappedData[] | null}//TSConversion This is complicated !! check app for details. - // // let accountData = null - // // let ourLockID = -1 - // // try { - // // ourLockID = await this.fifoLock('accountModification') - // // accountData = await this.app.getAccountData(payload.accountStart, payload.accountEnd, payload.maxRecords) - // // } finally { - // // this.fifoUnlock('accountModification', ourLockID) - // // } - // // //PERF Disiable this in production or performance testing. - // // this.testAccountDataWrapped(accountData) - // // result.accountData = accountData - // // await respond(result) - // }) - // // After joining the network - // // Record Joined timestamp - // // Even a syncing node will receive accepted transactions - // // Starts receiving accepted transaction and saving them to Accepted Tx Table - // this.p2p.registerGossipHandler('acceptedTx', async (acceptedTX: AcceptedTx, sender: Shardus.Node, tracker: string) => { - // // docs mention putting this in a table but it seems so far that an in memory queue should be ok - // // should we filter, or instead rely on gossip in to only give us TXs that matter to us? - // this.p2p.sendGossipIn('acceptedTx', acceptedTX, tracker, sender) - // let noConsensus = false // this can only be true for a set command which will never come from an endpoint - // this.stateManager.transactionQueue.routeAndQueueAcceptedTransaction(acceptedTX, /*sendGossip*/ false, sender, /*globalModification*/ false, noConsensus) - // //Note await not needed so beware if you add code below this. - // }) - // // /get_accepted_transactions (Ts_start, Ts_end) - // // Ts_start - get data newer than this timestamp - // // Ts_end - get data older than this timestamp - // // Returns data from the Accepted Tx Table starting with Ts_start; limits result to 500 records (as configured) - // // Updated names: tsStart, tsEnd - // this.p2p.registerInternal('get_accepted_transactions', async (payload: AcceptedTransactionsReq, respond: (arg0: { transactions: Shardus.AcceptedTx[] }) => any) => { - // let result = {} as { transactions: Shardus.AcceptedTx[] } - // if (!payload.limit) { - // payload.limit = 10 - // } - // let transactions = await this.storage.queryAcceptedTransactions(payload.tsStart, payload.tsEnd, payload.limit) - // result.transactions = transactions - // await respond(result) - // }) - // this.p2p.registerInternal('get_account_data2', async (payload: GetAccountData2Req, respond: (arg0: { accountData: Shardus.WrappedData[] | null }) => any) => { - // let result = {} as { accountData: Shardus.WrappedData[] | null } //TSConversion This is complicated !! - // let accountData = null - // let ourLockID = -1 - // try { - // ourLockID = await this.fifoLock('accountModification') - // accountData = await this.app.getAccountDataByRange(payload.accountStart, payload.accountEnd, payload.tsStart, payload.tsEnd, payload.maxRecords) - // } finally { - // this.fifoUnlock('accountModification', ourLockID) - // } - // //PERF Disiable this in production or performance testing. - // this.testAccountDataWrapped(accountData) - // result.accountData = accountData - // await respond(result) - // }) - // // /get_transactions_by_list (Tx_ids) - // // Tx_ids - array of transaction ids - // // Returns data from the Transactions Table for just the given transaction ids - // this.p2p.registerInternal('get_transactions_by_list', async (payload: GetTransactionsByListReq, respond: (arg0: Shardus.AcceptedTx[]) => any) => { - // let result = [] as AcceptedTx[] - // try { - // result = await this.storage.queryAcceptedTransactionsByIds(payload.Tx_ids) - // } finally { - // } - // await respond(result) - // }) - // this.p2p.registerInternal('get_transactions_by_partition_index', async (payload: TransactionsByPartitionReq, respond: (arg0: TransactionsByPartitionResp) => any) => { - // // let result = {} - // let passFailList = [] - // let statesList = [] - // let acceptedTXs = null - // try { - // // let partitionId = payload.partitionId - // let cycle = payload.cycle - // let indicies = payload.tx_indicies - // let hash = payload.hash - // let partitionId = payload.partitionId - // let expectedResults = indicies.length - // let returnedResults = 0 - // let key = 'c' + cycle - // let partitionObjectsByHash = this.partitionObjects.recentPartitionObjectsByCycleByHash[key] - // if (!partitionObjectsByHash) { - // await respond({ success: false }) - // } - // let partitionObject = partitionObjectsByHash[hash] - // if (!partitionObject) { - // await respond({ success: false }) - // } - // let txIDList = [] - // for (let index of indicies) { - // let txid = partitionObject.Txids[index] - // txIDList.push(txid) - // let passFail = partitionObject.Status[index] - // passFailList.push(passFail) - // } - // for (let index of indicies) { - // let state = partitionObject.States[index] - // statesList.push(state) - // if (state != null) { - // returnedResults++ - // } - // } - // if (returnedResults < expectedResults) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(`get_transactions_by_partition_index failed! returnedResults < expectedResults send ${returnedResults} < ${expectedResults}`) - // } - // acceptedTXs = await this.storage.queryAcceptedTransactionsByIds(txIDList) - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(`get_transactions_by_partition_index failed! returnedResults < expectedResults send2 `) - // if (acceptedTXs != null && acceptedTXs.length < expectedResults) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(`get_transactions_by_partition_index results ${utils.stringifyReduce(acceptedTXs)} snippets ${utils.stringifyReduce(payload.debugSnippets)} `) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(`get_transactions_by_partition_index results2:${utils.stringifyReduce(acceptedTXs.map((x: Shardus.AcceptedTx) => x.id))} snippets:${utils.stringifyReduce(payload.debugSnippets)} txid:${utils.stringifyReduce(txIDList)} `) - // let acceptedTXsBefore = 0 - // if (acceptedTXs != null) { - // acceptedTXsBefore = acceptedTXs.length - // } - // // find an log missing results: - // // for(let txid of txIDList) - // let received: StringBoolObjectMap = {} - // for (let acceptedTX of acceptedTXs) { - // received[acceptedTX.id] = true - // } - // let missingTXs: string[] = [] - // let missingTXHash: StringBoolObjectMap = {} - // for (let txid of txIDList) { - // if (received[txid] !== true) { - // missingTXs.push(txid) - // missingTXHash[txid] = true - // } - // } - // let finds = -1 - // let txTally = this.partitionObjects.getTXList(cycle, partitionId) - // let found = [] - // if (txTally) { - // finds = 0 - // for (let tx of txTally.txs) { - // if (missingTXHash[tx.id] === true) { - // finds++ - // acceptedTXs.push(tx) - // found.push(tx.id) - // } - // } - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(`get_transactions_by_partition_index failed! returnedResults < expectedResults send3 ${acceptedTXsBefore} < ${expectedResults} findsFixed: ${finds} missing: ${utils.stringifyReduce(missingTXs)} found: ${utils.stringifyReduce(found)} acceptedTXs.length updated: ${acceptedTXs.length}`) - // } else { - // } - // } catch (ex) { - // this.statemanager_fatal(`get_transactions_by_partition_index_ex`, 'get_transactions_by_partition_index failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } finally { - // } - // // TODO fix pass fail sorting.. it is probably all wrong and out of sync, but currently nothing fails. - // await respond({ success: true, acceptedTX: acceptedTXs, passFail: passFailList, statesList: statesList }) - // }) - // // /get_partition_txids (Partition_id, Cycle_number) - // // Partition_id - // // Cycle_number - // // Returns the partition object which contains the txids along with the status - // this.p2p.registerInternal('get_partition_txids', async (payload: GetPartitionTxidsReq, respond: (arg0: {}) => any) => { - // let result = {} - // try { - // let id = payload.Partition_id - // let key = 'c' + payload.Cycle_number - // let partitionObjects = this.partitionObjects.partitionObjectsByCycle[key] - // for (let obj of partitionObjects) { - // if (obj.Partition_id === id) { - // result = obj - // } - // } - // } finally { - // } - // await respond(result) - // }) - } - - /*** - * ## ## #### ###### ###### - * ### ### ## ## ## ## ## - * #### #### ## ## ## - * ## ### ## ## ###### ## - * ## ## ## ## ## - * ## ## ## ## ## ## ## - * ## ## #### ###### ###### - */ - - // /** - // * sendPartitionData - // * @param {PartitionReceipt} partitionReceipt - // * @param {PartitionObject} paritionObject - // */ - // sendPartitionData(partitionReceipt: PartitionReceipt, paritionObject: PartitionObject) { - // if (partitionReceipt.resultsList.length === 0) { - // return - // } - // // CombinedPartitionReceipt - - // let partitionReceiptCopy = JSON.parse(stringify(partitionReceipt.resultsList[0])) - - // /** @type {CombinedPartitionReceipt} */ - // let combinedReciept = { result: partitionReceiptCopy, signatures: partitionReceipt.resultsList.map((a) => a.sign) } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(' sendPartitionData ' + utils.stringifyReduceLimit({ combinedReciept, paritionObject })) - - // // send it - // // this.p2p.archivers.sendPartitionData(combinedReciept, paritionObject) - // } - - // sendTransactionData(partitionNumber: number, cycleNumber: number, transactions: AcceptedTx[]) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(' sendTransactionData ' + utils.stringifyReduceLimit({ partitionNumber, cycleNumber, transactions })) - - // // send it - // // this.p2p.archivers.sendTransactionData(partitionNumber, cycleNumber, transactions) - // } - - // /** - // * trySendAndPurgeReciepts - // * @param {PartitionReceipt} partitionReceipt - // */ - // trySendAndPurgeReceiptsToArchives(partitionReceipt: PartitionReceipt) { - // if (partitionReceipt.resultsList.length === 0) { - // return - // } - // let cycleNumber = partitionReceipt.resultsList[0].Cycle_number - // let partitionId = partitionReceipt.resultsList[0].Partition_id - // let key = `c${cycleNumber}p${partitionId}` - // if (this.sentReceipts.has(key)) { - // return - // } - - // if (logFlags.verbose) this.mainLogger.debug(' trySendAndPurgeReceipts ' + key) - - // this.sentReceipts.set(key, true) - // try { - // if (this.sendArchiveData === true) { - // let paritionObject = this.getPartitionObject(cycleNumber, partitionId) // todo get object - // if (paritionObject == null) { - // this.statemanager_fatal(`trySendAndPurgeReceiptsToArchives`, ` trySendAndPurgeReceiptsToArchives paritionObject == null ${cycleNumber} ${partitionId}`) - // throw new Error(`trySendAndPurgeReceiptsToArchives paritionObject == null`) - // } - // this.sendPartitionData(partitionReceipt, paritionObject) - // } - // } finally { - // } - - // if (this.sendTransactionData) { - // // let txList = this.stateManager.partitionObjects.getTXList(cycleNumber, partitionId) - - // // this.sendTransactionData(partitionId, cycleNumber, txList.txs) - // } - - // if (this.purgeArchiveData === true) { - // // alreay sort of doing this in another spot. - // // check if all partitions for this cycle have been handled!! then clear data in that time range. - // // need to record time range. - // // or check for open repairs. older than what we want to clear out. - // } - // } - - // storeOurPartitionReceipt(cycleNumber: number, partitionReceipt: PartitionReceipt) { - // let key = 'c' + cycleNumber - - // if (!this.stateManager.ourPartitionReceiptsByCycleCounter) { - // this.stateManager.ourPartitionReceiptsByCycleCounter = {} - // } - // this.stateManager.ourPartitionReceiptsByCycleCounter[key] = partitionReceipt - // } - - // getPartitionReceipt(cycleNumber: number) { - // let key = 'c' + cycleNumber - - // if (!this.stateManager.ourPartitionReceiptsByCycleCounter) { - // return null - // } - // return this.stateManager.ourPartitionReceiptsByCycleCounter[key] - // } - - // /** - // * getPartitionObject - // * @param {number} cycleNumber - // * @param {number} partitionId - // * @returns {PartitionObject} - // */ - // getPartitionObject(cycleNumber: number, partitionId: number): PartitionObject | null { - // let key = 'c' + cycleNumber - // let partitionObjects = this.stateManager.partitionObjects.partitionObjectsByCycle[key] - // for (let obj of partitionObjects) { - // if (obj.Partition_id === partitionId) { - // return obj - // } - // } - // return null - // } - - /*** - * ## ## ### ###### ## ## ###### ######## ######## ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## - * ######### ## ## ###### ######### ###### ###### ## ###### - * ## ## ######### ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ###### ## ## ###### ######## ## ###### - */ - - // /** - // * findMostCommonResponse - // * @param {number} cycleNumber - // * @param {number} partitionId - // * @param {string[]} ignoreList currently unused and broken todo resolve this. - // * @return {{topHash: string, topCount: number, topResult: PartitionResult}} - // */ - // findMostCommonResponse(cycleNumber: number, partitionId: number, ignoreList: string[]): { topHash: string | null; topCount: number; topResult: PartitionResult | null } { - // let key = 'c' + cycleNumber - // let responsesById = this.stateManager.partitionObjects.allPartitionResponsesByCycleByPartition[key] - // let key2 = 'p' + partitionId - // let responses = responsesById[key2] - - // let hashCounting: StringNumberObjectMap = {} - // let topHash = null - // let topCount = 0 - // let topResult = null - // if (responses.length > 0) { - // for (let partitionResult of responses) { - // let hash = partitionResult.Partition_hash - // let count = hashCounting[hash] || 0 - // count++ - // hashCounting[hash] = count - // if (count > topCount) { - // topCount = count - // topHash = hash - // topResult = partitionResult - // } - // } - // } - // // reaponsesById: ${utils.stringifyReduce(responsesById)} - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair findMostCommonResponse: retVal: ${utils.stringifyReduce({ topHash, topCount, topResult })} responses: ${utils.stringifyReduce(responses)} `) - // return { topHash, topCount, topResult } - // } - - // // vote rate set to 0.5 / 0.8 => 0.625 - // /** - // * solveHashSets - // * @param {GenericHashSetEntry[]} hashSetList - // * @param {number} lookAhead - // * @param {number} voteRate - // * @param {string[]} prevOutput - // * @returns {string[]} - // */ - // static solveHashSets(hashSetList: GenericHashSetEntry[], lookAhead: number = 10, voteRate: number = 0.625, prevOutput: string[] | null = null): string[] { - // let output = [] - // let outputVotes = [] - // let solving = true - // let index = 0 - // let lastOutputCount = 0 // output list length last time we went through the loop - // let stepSize = cHashSetStepSize - - // let totalVotePower = 0 - // for (let hashListEntry of hashSetList) { - // totalVotePower += hashListEntry.votePower - // } - // let votesRequired = voteRate * Math.ceil(totalVotePower) - - // let maxElements = 0 - // for (let hashListEntry of hashSetList) { - // maxElements = Math.max(maxElements, hashListEntry.hashSet.length / stepSize) - // } - - // while (solving) { - // let votes: StringCountEntryObjectMap = {} - // let topVote: Vote = { v: '', count: 0, vote: undefined, ec: undefined } - // let winnerFound = false - // let totalVotes = 0 - // // Loop through each entry list - // for (let hashListIndex = 0; hashListIndex < hashSetList.length; hashListIndex++) { - // // if we are already past the end of this entry list then skip - // let hashListEntry = hashSetList[hashListIndex] - // if ((index + hashListEntry.indexOffset + 1) * stepSize > hashListEntry.hashSet.length) { - // continue - // } - // // don't remember what this bail condition was. - // let sliceStart = (index + hashListEntry.indexOffset) * stepSize - // let v = hashListEntry.hashSet.slice(sliceStart, sliceStart + stepSize) - // if (v === '') { - // continue - // } - // // place votes for this value - // let countEntry: CountEntry = votes[v] || { count: 0, ec: 0, voters: [] } - // totalVotes += hashListEntry.votePower - // countEntry.count += hashListEntry.votePower - // countEntry.voters.push(hashListIndex) - // votes[v] = countEntry - // if (countEntry.count > topVote.count) { - // topVote.count = countEntry.count - // topVote.v = v - // topVote.vote = countEntry - // } - // hashListEntry.lastValue = v - // } - - // // if totalVotes < votesRequired then we are past hope of approving any more messages... I think. I guess there are some cases where we could look back and approve one more - // if (topVote.count === 0 || index > maxElements || totalVotes < votesRequired) { - // solving = false - // break - // } - // // can we find a winner in a simple way where there was a winner based on the next item to look at in all the arrays. - // if (topVote.count >= votesRequired) { - // winnerFound = true - // output.push(topVote.v) - // outputVotes.push(topVote) - // // corrections for chains that do not match our top vote. - // for (let k = 0; k < hashSetList.length; k++) { - // let hashListEntryOther = hashSetList[k] - // if (hashListEntryOther.lastValue === topVote.v) { - // hashListEntryOther.errorStack = [] - // } - // } - // } - - // // Leaving this here, because it is a good spot to put a breakpoint when testing a data set where stuf went wrong (hashset.js) - // // if (index === 123) { - // // let foo = 5 - // // foo++ - // // } - - // // for (let hashListEntry of hashSetList) { - // for (let hashListIndex = 0; hashListIndex < hashSetList.length; hashListIndex++) { - // let hashListEntry = hashSetList[hashListIndex] - // // for nodes that did not match the top vote .. or all nodes if no winner yet. - // if (!winnerFound || hashListEntry.lastValue !== topVote.v) { - // // consider removing v.. since if we dont have a winner yet then top vote will get updated in this loop - // hashListEntry.corrections.push({ i: index, tv: topVote, v: topVote.v, t: 'insert', bv: hashListEntry.lastValue, if: lastOutputCount }) - // hashListEntry.errorStack.push({ i: index, tv: topVote, v: topVote.v }) - // hashListEntry.indexOffset -= 1 - - // if (hashListEntry.waitForIndex > 0 && index < hashListEntry.waitForIndex) { - // continue - // } - - // if (hashListEntry.waitForIndex > 0 && hashListEntry.waitForIndex === index) { - // hashListEntry.waitForIndex = -1 - // hashListEntry.waitedForThis = true - // } - - // let alreadyVoted: StringBoolObjectMap = {} // has the given node already EC voted for this key? - // // +1 look ahead to see if we can get back on track - // // lookAhead of 0 seems to be more stable - // // let lookAhead = 10 // hashListEntry.errorStack.length - // for (let i = 0; i < hashListEntry.errorStack.length + lookAhead; i++) { - // // using +2 since we just subtracted one from the index offset. anothe r +1 since we want to look ahead of where we just looked - // let thisIndex = index + hashListEntry.indexOffset + i + 2 - // let sliceStart = thisIndex * stepSize - // if (sliceStart + 1 > hashListEntry.hashSet.length) { - // continue - // } - // let v = hashListEntry.hashSet.slice(sliceStart, sliceStart + stepSize) - // if (alreadyVoted[v]) { - // continue - // } - - // // a hint to stop us from looking ahead too far - // // if (prevOutput && prevOutput[index + i + 2] === v) { - // // break - // // } - - // // scan ahead for other connections - // if (prevOutput && !hashListEntry.waitedForThis) { - // let foundMatch = false - // let searchAhead = 5 // Math.max(10, lookAhead - i) - // for (let k = 1; k < searchAhead; k++) { - // let idx = index + k // + 2 + hashListEntry.indexOffset - // if (prevOutput.length <= idx) { - // break - // } - // if (prevOutput && prevOutput[idx] === v) { - // foundMatch = true - // hashListEntry.waitForIndex = index + k - // hashListEntry.futureIndex = index + hashListEntry.indexOffset + i + 2 - // hashListEntry.futureValue = v - // } - // } - // if (foundMatch) { - // break - // } - // } - - // alreadyVoted[v] = true - // let countEntry: CountEntry = votes[v] || { count: 0, ec: 0, voters: [] } // TSConversion added a missing voters[] object here. looks good to my code inspection but need to validate it with tests! - - // // only vote 10 spots ahead - // if (i < 10) { - // countEntry.ec += hashListEntry.votePower - // } - - // // check for possible winnner due to re arranging things - // // a nuance here that we require there to be some official votes before in this row before we consider a tx.. will need to analyze this choice - // if (!winnerFound && countEntry.count > 0 && countEntry.ec + countEntry.count >= votesRequired) { - // topVote.ec = countEntry.ec - // topVote.v = v - // topVote.vote = countEntry - // winnerFound = true - // output.push(topVote.v) - // outputVotes.push(topVote) - // // todo roll back corrctions where nodes were already voting for the winner. - // for (let k = 0; k < hashListIndex; k++) { - // let hashListEntryOther = hashSetList[k] - // if (hashListEntryOther.lastValue === topVote.v) { - // hashListEntryOther.errorStack.pop() - // hashListEntryOther.corrections.pop() - // hashListEntryOther.indexOffset++ - // } - // } - // } - - // if (winnerFound) { - // if (v === topVote.v) { - // if (hashListEntry.waitedForThis) { - // hashListEntry.waitedForThis = false - // } - // // delete stuff off stack and bail - // // +1 because we at least want to delete 1 entry if index i=0 of this loop gets us here - - // /** @type {HashSetEntryCorrection[]} */ - // let tempCorrections = [] - // // for (let j = 0; j < i + 1; j++) { - // // let correction = null - // // //if (i < hashListEntry.errorStack.length) - // // { - // // hashListEntry.errorStack.pop() - // // correction = hashListEntry.corrections.pop() - // // } - // // tempCorrections.push({ i: index - j, t: 'extra', c: correction }) - // // } - // let index2 = index + hashListEntry.indexOffset + i + 2 - // let lastIdx = -1 - - // for (let j = 0; j < i + 1; j++) { - // /** @type {HashSetEntryCorrection} */ - // let correction = null - // if (hashListEntry.errorStack.length > 0) { - // hashListEntry.errorStack.pop() - // correction = hashListEntry.corrections.pop() - // } - // let extraIdx = j + index2 - (i + 1) - // if (correction) { - // extraIdx = correction.i - 1 - // lastIdx = extraIdx - // } else if (lastIdx > 0) { - // extraIdx = lastIdx - // } - // // correction to fix problem where we were over deleting stuff. - // // a bit more retroactive than I like. problem happens in certain cases when there are two winners in a row that are not first pass winners - // // see 16z for example where this breaks.. - // // if (hashListEntry.corrections.length > 0) { - // // let nextCorrection = hashListEntry.corrections[hashListEntry.corrections.length - 1] - // // if (nextCorrection && correction && nextCorrection.bv === correction.bv) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug( ` solveHashSets overdelete fix: i:${i} j:${j} index:${index} bv:${nextCorrection.bv}}`) - // // continue - // // } - // // } - - // // hashListEntry.indexOffset++ - // /** @type {HashSetEntryCorrection} */ - - // // @ts-ignore solveHashSets is unused at the moment not going to bother with ts fixup - // let tempCorrection: HashSetEntryCorrection = { i: extraIdx, t: 'extra', c: correction, hi: index2 - (j + 1), tv: null, v: null, bv: null, if: -1 } // added tv: null, v: null, bv: null, if: -1 - // tempCorrections.push(tempCorrection) - // } - - // hashListEntry.corrections = hashListEntry.corrections.concat(tempCorrections) - // // +2 so we can go from being put one behind and go to 1 + i ahead. - // hashListEntry.indexOffset += i + 2 - - // // hashListEntry.indexOffset += (1) - - // hashListEntry.errorStack = [] // clear the error stack - // break - // } else { - // // backfil checking - // // let outputIndex = output.length - 1 - // // let tempV = v - // // let stepsBack = 1 - // // while (output.length > 0 && outputIndex > 0 && output[outputIndex] === tempV) { - // // // work backwards through continuous errors and redact them as long as they match up - // // outputIndex-- - // // stepsBack++ - // // } - // } - // } - // } - - // if (hashListEntry.waitedForThis) { - // hashListEntry.waitedForThis = false - // } - // } - // } - // index++ - // lastOutputCount = output.length - // } - - // // trailing extras cleanup. - // for (let hashListIndex = 0; hashListIndex < hashSetList.length; hashListIndex++) { - // let hashListEntry = hashSetList[hashListIndex] - - // let extraIdx = index - // while ((extraIdx + hashListEntry.indexOffset) * stepSize < hashListEntry.hashSet.length) { - // let hi = extraIdx + hashListEntry.indexOffset // index2 - (j + 1) - // // @ts-ignore solveHashSets is unused at the moment not going to bother with ts fixup - // hashListEntry.corrections.push({ i: extraIdx, t: 'extra', c: null, hi: hi, tv: null, v: null, bv: null, if: -1 }) // added , tv: null, v: null, bv: null, if: -1 - // extraIdx++ - // } - // } - - // return output // { output, outputVotes } - // } - - // // figures out i A is Greater than B - // // possibly need an alternate version of this solver - // // needs to account for vote power! - // static compareVoteObjects(voteA: ExtendedVote, voteB: ExtendedVote, strict: boolean) { - // // { winIdx: null, val: v, count: 0, ec: 0, lowestIndex: index, voters: [], voteTally: Array(hashSetList.length) } - // // { i: index } - - // let agtb = 0 - // let bgta = 0 - - // for (let i = 0; i < voteA.voteTally.length; i++) { - // let vtA = voteA.voteTally[i] - // let vtB = voteB.voteTally[i] - // if (vtA != null && vtB != null) { - // if (vtA.i > vtB.i) { - // agtb += vtA.p // vote power. note A and B are the same node so power will be equal. - // } - // if (vtB.i > vtA.i) { - // bgta += vtB.p // vote power. - // } - // } - // } - // // what to do with strict. - // if (strict && agtb > 0) { - // return 1 - // } - - // //return agtb - bgta - - // return utils.sortAsc(agtb, bgta) - - // // what to return? - // } - - // // static compareVoteObjects2 (voteA, voteB, strict) { - // // // return voteB.votesseen - voteA.votesseen - // // return voteA.votesseen - voteB.votesseen - // // } - - // // when sorting / computing need to figure out if pinning will short cirquit another vote. - // // at the moment this seems - - // // vote rate set to 0.5 / 0.8 => 0.625 - // /** - // * solveHashSets - // * @param {GenericHashSetEntry[]} hashSetList - // * @param {number} lookAhead - // * @param {number} voteRate - // * - // * @returns {string[]} - // */ - // static solveHashSets2(hashSetList: GenericHashSetEntry[], lookAhead: number = 10, voteRate: number = 0.625): string[] { - // let output: string[] = [] - // // let outputVotes = [] - // let solving = true - // let index = 0 - // let stepSize = cHashSetStepSize - - // let totalVotePower = 0 - // for (let hashListEntry of hashSetList) { - // totalVotePower += hashListEntry.votePower - // // init the pinIdx - // hashListEntry.pinIdx = -1 - // hashListEntry.pinObj = null - // } - // let votesRequired = voteRate * Math.ceil(totalVotePower) - - // let maxElements = 0 - // for (let hashListEntry of hashSetList) { - // maxElements = Math.max(maxElements, hashListEntry.hashSet.length / stepSize) - // } - - // // todo backtrack each vote. list of what vote cast at each step. - // // solve this for only one object... or solve for all and compare solvers? - - // // map of array of vote entries - // let votes = {} as { [x: string]: ExtendedVote[] } - // let votesseen = 0 - // while (solving) { - // // Loop through each entry list - // solving = false - // for (let hashListIndex = 0; hashListIndex < hashSetList.length; hashListIndex++) { - // // if we are already past the end of this entry list then skip - // let hashListEntry = hashSetList[hashListIndex] - // if ((index + 1) * stepSize > hashListEntry.hashSet.length) { - // continue - // } - // // don't remember what this bail condition was. - // let sliceStart = index * stepSize - // let v = hashListEntry.hashSet.slice(sliceStart, sliceStart + stepSize) - // if (v === '') { - // continue - // } - // solving = true // keep it going - // let votesArray: ExtendedVote[] = votes[v] - // if (votesArray == null) { - // votesseen++ - // //TSConversion this was potetially a major bug, v was missing from this structure before! - // // @ts-ignore TSConversion solveHashSets2 is unused. but need to hold off o fixing up these potential nulls - // let votObject: ExtendedVote = { winIdx: null, val: v, v, count: 0, ec: 0, lowestIndex: index, voters: [], voteTally: Array(hashSetList.length), votesseen } as ExtendedVote - // votesArray = [votObject] - // votes[v] = votesArray - - // // hashListEntry.ownVotes.push(votObject) - // } - - // // get lowest value in list that we have not voted on and is not pinned by our best vote. - // let currentVoteObject: ExtendedVote | null = null - // for (let voteIndex = votesArray.length - 1; voteIndex >= 0; voteIndex--) { - // let voteObject = votesArray[voteIndex] - - // let ourVoteTally = voteObject.voteTally[hashListIndex] - // if (ourVoteTally != null) { - // // we voted - // break - // } - - // // how to check pinIdx? do we have to analys neighbor pinIdx? - // // use pinObj to see if the last pinObj A is greater than this obj B. - // if (hashListEntry.pinObj != null && hashListEntry.pinObj !== voteObject) { - // // if (hashListEntry.pinObj.val === voteObject.val) - // { - // let compare = Depricated.compareVoteObjects(hashListEntry.pinObj, voteObject, false) - // if (compare > 0) { - // continue // or break; - // } - // } - // } - // currentVoteObject = voteObject - // } - - // if (currentVoteObject == null) { - // // create new vote object - // votesseen++ - // //TSConversion this was potetially a major bug, v was missing from this structure before! - // // @ts-ignore TSConversion solveHashSets2 is unused. but need to hold off o fixing up these potential nulls - // currentVoteObject = { winIdx: null, val: v, v, count: 0, ec: 0, lowestIndex: index, voters: [], voteTally: Array(hashSetList.length), votesseen } as ExtendedVote - // votesArray.push(currentVoteObject) - // // hashListEntry.ownVotes.push(currentVoteObject) - // } - // if (currentVoteObject.voters == null) { - // throw new Error('solveHashSets2 currentVoteObject.voters == null') - // } - // if (hashListEntry == null || hashListEntry.ownVotes == null) { - // throw new Error(`solveHashSets2 hashListEntry == null ${hashListEntry == null}`) - // } - - // currentVoteObject.voters.push(hashListIndex) - // currentVoteObject.voteTally[hashListIndex] = { i: index, p: hashListEntry.votePower } // could this be a simple index - // currentVoteObject.count += hashListEntry.votePower - // hashListEntry.ownVotes.push(currentVoteObject) - - // if (currentVoteObject.winIdx !== null) { - // // this already won before but we should still update our own pinIdx - - // hashListEntry.pinIdx = index - // hashListEntry.pinObj = currentVoteObject - // } - // if (currentVoteObject.count >= votesRequired) { - // for (let i = 0; i < hashSetList.length; i++) { - // let tallyObject = currentVoteObject.voteTally[i] - // if (tallyObject != null) { - // let tallyHashListEntry = hashSetList[i] - // tallyHashListEntry.pinIdx = tallyObject.i - // tallyHashListEntry.pinObj = currentVoteObject - // } - // } - // currentVoteObject.winIdx = index - // } - // } - - // index++ - // } - - // // need backtracking ref for how each list tracks the votses - - // // Collect a list of all vodes - // let allVotes: ExtendedVote[] = [] - // for (const votesArray of Object.values(votes)) { - // for (let voteObj of votesArray) { - // allVotes.push(voteObj) - // } - // } - // // apply a partial order sort, n - // // allVotes.sort(function (a, b) { return Depricated.compareVoteObjects(a, b, false) }) - - // // generate solutions! - - // // count only votes that have won! - // // when / how is it safe to detect a win? - - // let allWinningVotes: ExtendedVote[] = [] - // for (let voteObj of allVotes) { - // // IF was a a winning vote? - // if (voteObj.winIdx !== null) { - // allWinningVotes.push(voteObj) - // } - // } - // allWinningVotes.sort(function (a, b) { - // return Depricated.compareVoteObjects(a, b, false) - // }) - // let finalIdx = 0 - // for (let voteObj of allWinningVotes) { - // // IF was a a winning vote? - // if (voteObj.winIdx !== null) { - // // allWinningVotes.push(voteObj) - // output.push(voteObj.val) - // voteObj.finalIdx = finalIdx - // finalIdx++ - // } - // } - // // to sort the values we could look at the order things were finalized.. - // // but you could have a case where an earlier message is legitimately finialized later on. - - // // let aTest = votes['55403088d5636488d3ff17d7d90c052e'][0] - // // let bTest = votes['779980ea84b8a5eac2dc3d07013377e5'][0] - // // if (logFlags.console) console.log(Depricated.compareVoteObjects(aTest, bTest, false)) - // // if (logFlags.console) console.log(Depricated.compareVoteObjects(bTest, aTest, false)) - - // // correction solver: - // for (let hashListIndex = 0; hashListIndex < hashSetList.length; hashListIndex++) { - // // if we are already past the end of this entry list then skip - // // let hashListIndex = 2 - - // let hashListEntry = hashSetList[hashListIndex] - // hashListEntry.corrections = [] // clear this - // // hashListEntry.instructions = [] - // // /* prettier-ignore */ if (logFlags.console) console.log(`solution for set ${hashListIndex} locallen:${hashListEntry.hashSet.length / stepSize} `) - // let winningVoteIndex = 0 - // for (let voteObj of allWinningVotes) { - // if (voteObj.voteTally[hashListIndex] == null) { - // // if (logFlags.console) console.log(`missing @${voteObj.finalIdx} v:${voteObj.val}`) - // // bv: hashListEntry.lastValue, if: lastOutputCount are old. - // // @ts-ignore TSConversion solveHashSets2 is unused. but need to hold off o fixing up these potential nulls - // hashListEntry.corrections.push({ i: winningVoteIndex, tv: voteObj, v: voteObj.val, t: 'insert', bv: null, if: -1 }) - // } - // // what if we have it but it is in the wrong spot!! - // winningVoteIndex++ - // } - // if (hashListEntry == null || hashListEntry.ownVotes == null) { - // throw new Error(`solveHashSets2 hashListEntry == null 2 ${hashListEntry == null}`) - // } - // for (let voteObj of hashListEntry.ownVotes) { - // let localIdx = voteObj.voteTally[hashListIndex].i - // if (voteObj.winIdx == null) { - // // if (logFlags.console) console.log(`extra @${stringify(voteObj.voteTally[hashListIndex])} v:${voteObj.val}`) - // // @ts-ignore TSConversion solveHashSets2 is unused. but need to hold off o fixing up these potential nulls - // hashListEntry.corrections.push({ i: localIdx, t: 'extra', c: null, hi: localIdx, tv: null, v: null, bv: null, if: -1 }) - // } - // // localIdx++ - // } - - // // not so sure about this sort local vs. global index space. - // hashListEntry.corrections.sort(utils.sort_i_Asc) // (a, b) => a.i - b.i) - // winningVoteIndex = 0 - - // // hashListEntry.allWinningVotes = allWinningVotes - - // // build index map now! - // hashListEntry.indexMap = [] - // hashListEntry.extraMap = [] - - // for (let voteObj of allWinningVotes) { - // if (voteObj.voteTally[hashListIndex] == null) { - // hashListEntry.indexMap.push(-1) - // } else { - // hashListEntry.indexMap.push(voteObj.voteTally[hashListIndex].i) - // } - // } - // for (let voteObj of hashListEntry.ownVotes) { - // let localIdx = voteObj.voteTally[hashListIndex].i - // if (voteObj.winIdx == null) { - // hashListEntry.extraMap.push(localIdx) - // } - // } - // } - - // // generate corrections for main entry. - // // hashListEntry.corrections.push({ i: index, tv: topVote, v: topVote.v, t: 'insert', bv: hashListEntry.lastValue, if: lastOutputCount }) - // // hashListEntry.errorStack.push({ i: index, tv: topVote, v: topVote.v }) - // // hashListEntry.indexOffset -= 1 - - // // trailing extras: - // // while ((extraIdx + hashListEntry.indexOffset) * stepSize < hashListEntry.hashSet.length) { - // // let hi = extraIdx + hashListEntry.indexOffset // index2 - (j + 1) - // // hashListEntry.corrections.push({ i: extraIdx, t: 'extra', c: null, hi: hi, tv: null, v: null, bv: null, if: -1 }) // added , tv: null, v: null, bv: null, if: -1 - // // extraIdx++ - // // } - - // return output // { output, outputVotes } - // } - - // /** - // * expandIndexMapping - // * efficient transformation to create a lookup to go from answer space index to the local index space of a hashList entry - // * also creates a list of local indicies of elements to remove - // * @param {GenericHashSetEntry} hashListEntry - // * @param {string[]} output This is the output that we got from the general solver - // */ - // static expandIndexMapping(hashListEntry: GenericHashSetEntry, output: string[]) { - // // hashListEntry.corrections.sort(function (a, b) { return a.i === b.i ? 0 : a.i < b.i ? -1 : 1 }) - // // // index map is our index to the solution output - // // hashListEntry.indexMap = [] - // // // extra map is the index in our list that is an extra - // // hashListEntry.extraMap = [] - // // let readPtr = 0 - // // let writePtr = 0 - // // let correctionIndex = 0 - // // let currentCorrection = null - // // let extraBits = 0 - // // // This will walk the input and output indicies st that same time - // // while (writePtr < output.length) { - // // // Get the current correction. We walk this with the correctionIndex - // // if (correctionIndex < hashListEntry.corrections.length && hashListEntry.corrections[correctionIndex] != null && hashListEntry.corrections[correctionIndex].t === 'insert' && hashListEntry.corrections[correctionIndex].i <= writePtr) { - // // currentCorrection = hashListEntry.corrections[correctionIndex] - // // correctionIndex++ - // // } else if (correctionIndex < hashListEntry.corrections.length && hashListEntry.corrections[correctionIndex] != null && hashListEntry.corrections[correctionIndex].t === 'extra' && hashListEntry.corrections[correctionIndex].hi <= readPtr) { - // // currentCorrection = hashListEntry.corrections[correctionIndex] - // // correctionIndex++ - // // } else { - // // currentCorrection = null - // // } - // // // if (extraBits > 0) { - // // // readPtr += extraBits - // // // extraBits = 0 - // // // } - // // // increment pointers based on if there is a correction to write and what type of correction it is - // // if (!currentCorrection) { - // // // no correction to consider so we just write to the index map and advance the read and write pointer - // // hashListEntry.indexMap.push(readPtr) - // // writePtr++ - // // readPtr++ - // // } else if (currentCorrection.t === 'insert') { - // // // insert means the fix for this slot is to insert an item, since we dont have it this will be -1 - // // hashListEntry.indexMap.push(-1) - // // writePtr++ - // // } else if (currentCorrection.t === 'extra') { - // // // hashListEntry.extraMap.push({ i: currentCorrection.i, hi: currentCorrection.hi }) - // // hashListEntry.extraMap.push(currentCorrection.hi) - // // extraBits++ - // // readPtr++ - // // // if (currentCorrection.c === null) { - // // // writePtr++ - // // // } - // // continue - // // } - // // } - // // // final corrections: - // // while (correctionIndex < hashListEntry.corrections.length) { - // // currentCorrection = hashListEntry.corrections[correctionIndex] - // // correctionIndex++ - // // if (currentCorrection.t === 'extra') { - // // // hashListEntry.extraMap.push({ i: currentCorrection.i, hi: currentCorrection.hi }) - // // hashListEntry.extraMap.push(currentCorrection.hi) - // // // extraBits++ - // // continue - // // } - // // } - // } - - // /** - // * solveHashSetsPrep - // * todo cleanup.. just sign the partition object asap so we dont have to check if there is a valid sign object throughout the code (but would need to consider perf impact of this) - // * @param {number} cycleNumber - // * @param {number} partitionId - // * @param {string} ourNodeKey - // * @return {GenericHashSetEntry[]} - // */ - // solveHashSetsPrep(cycleNumber: number, partitionId: number, ourNodeKey: string): HashSetEntryPartitions[] { - // let key = 'c' + cycleNumber - // let responsesById = this.stateManager.partitionObjects.allPartitionResponsesByCycleByPartition[key] - // let key2 = 'p' + partitionId - // let responses = responsesById[key2] - - // let hashSets = {} as { [hash: string]: HashSetEntryPartitions } - // let hashSetList: HashSetEntryPartitions[] = [] - // // group identical sets together - // let hashCounting: StringNumberObjectMap = {} - // for (let partitionResult of responses) { - // let hash = partitionResult.Partition_hash - // let count = hashCounting[hash] || 0 - // if (count === 0) { - // let owner: string | null = null - // if (partitionResult.sign) { - // owner = partitionResult.sign.owner - // } else { - // owner = ourNodeKey - // } - // //TSConversion had to assert that owner is not null with owner! seems ok - // let hashSet: HashSetEntryPartitions = { hash: hash, votePower: 0, hashSet: partitionResult.hashSet, lastValue: '', errorStack: [], corrections: [], indexOffset: 0, owners: [owner!], ourRow: false, waitForIndex: -1, ownVotes: [] } - // hashSets[hash] = hashSet - // hashSetList.push(hashSets[hash]) - // // partitionResult.hashSetList = hashSet //Seems like this was only ever used for debugging, going to ax it to be safe! - // } else { - // if (partitionResult.sign) { - // hashSets[hash].owners.push(partitionResult.sign.owner) - // } - // } - // if (partitionResult.sign == null || partitionResult.sign.owner === ourNodeKey) { - // hashSets[hash].ourRow = true - // // hashSets[hash].owners.push(ourNodeKey) - // } - - // count++ - // hashCounting[hash] = count - // hashSets[hash].votePower = count - // } - // // NOTE: the fields owners and ourRow are user data for shardus and not known or used by the solving algorithm - - // return hashSetList - // } - - // /** - // * testHashsetSolution - // * @param {GenericHashSetEntry} ourHashSet - // * @param {GenericHashSetEntry} solutionHashSet - // * @returns {boolean} - // */ - // static testHashsetSolution(ourHashSet: GenericHashSetEntry, solutionHashSet: GenericHashSetEntry, log: boolean = false): boolean { - // // let payload = { partitionId: partitionId, cycle: cycleNumber, tx_indicies: requestsByHost[i].hostIndex, hash: requestsByHost[i].hash } - // // repairTracker.solutionDeltas.push({ i: requestsByHost[i].requests[j], tx: acceptedTX, pf: result.passFail[j] }) - - // // let txSourceList = txList - // // if (txList.newTxList) { - // // txSourceList = txList.newTxList - // // } - - // // solutionDeltas.sort(function (a, b) {BAD SORT return a.i - b.i }) // why did b - a help us once?? - - // // let debugSol = [] - // // for (let solution of repairTracker.solutionDeltas) { - // // debugSol.push({ i: solution.i, tx: solution.tx.id.slice(0, 4) }) // TXSTATE_TODO - // // } - - // let stepSize = cHashSetStepSize - // let makeTXArray = function (hashSet: GenericHashSetEntry): string[] { - // let txArray: string[] = [] - // for (let i = 0; i < hashSet.hashSet.length / stepSize; i++) { - // let offset = i * stepSize - // let v = hashSet.hashSet.slice(offset, offset + stepSize) - // txArray.push(v) - // // need to slice out state??? - // } - // return txArray - // } - - // let txSourceList = { hashes: makeTXArray(ourHashSet) } - // let solutionTxList = { hashes: makeTXArray(solutionHashSet) } - // let newTxList = { thashes: [], hashes: [], states: [] } as { thashes: string[]; hashes: string[]; states: string[] } - - // let solutionList: HashSetEntryCorrection[] = [] - // for (let correction of ourHashSet.corrections) { - // if (correction.t === 'insert') { - // solutionList.push(correction) - // } - // } - - // // hack remove extraneous extras..///////////// - // // let extraMap2 = [] - // // for (let i = 0; i < ourHashSet.extraMap.length; i++) { - // // let extraIndex = ourHashSet.extraMap[i] - // // let extraNeeded = false - // // for (let correction of ourHashSet.corrections) { - // // if (correction.i === extraIndex) { - // // extraNeeded = true - // // break - // // } - // // } - // // if (extraNeeded) { - // // continue - // // } - // // extraMap2.push(extraIndex) - // // } - // // ourHashSet.extraMap = extraMap2 - // // /////////////////////////////////////// - - // if (ourHashSet.extraMap == null) { - // if (log) if (logFlags.console) console.log(`testHashsetSolution: ourHashSet.extraMap missing`) - // return false - // } - // if (ourHashSet.indexMap == null) { - // if (log) if (logFlags.console) console.log(`testHashsetSolution: ourHashSet.indexMap missing`) - // return false - // } - // ourHashSet.extraMap.sort(utils.sortAsc) // function (a, b) { return a - b }) - // solutionList.sort(utils.sort_i_Asc) // function (a, b) { return a.i - b.i }) - - // let extraIndex = 0 - // for (let i = 0; i < txSourceList.hashes.length; i++) { - // let extra = -1 - // if (extraIndex < ourHashSet.extraMap.length) { - // extra = ourHashSet.extraMap[extraIndex] - // } - // if (extra === i) { - // extraIndex++ - // continue - // } - // if (extra == null) { - // if (log) /* prettier-ignore */ if (logFlags.console) console.log(`testHashsetSolution error extra == null at i: ${i} extraIndex: ${extraIndex}`) - // break - // } - // if (txSourceList.hashes[i] == null) { - // if (log) if (logFlags.console) console.log(`testHashsetSolution error null at i: ${i} extraIndex: ${extraIndex}`) - // break - // } - - // newTxList.thashes.push(txSourceList.hashes[i]) - // // newTxList.tpassed.push(txSourceList.passed[i]) - // // newTxList.ttxs.push(txSourceList.txs[i]) - // } - - // let hashSet = '' - // // for (let hash of newTxList.thashes) { - // // hashSet += hash.slice(0, stepSize) - - // // // todo add in the account state stuff.. - // // } - // hashSet = Depricated.createHashSetString(newTxList.thashes, newTxList.states) // TXSTATE_TODO - - // if (log) /* prettier-ignore */ if (logFlags.console) console.log(`extras removed: len: ${ourHashSet.indexMap.length} extraIndex: ${extraIndex} ourPreHashSet: ${hashSet}`) - - // // Txids: txSourceData.hashes, // txid1, txid2, …], - ordered from oldest to recent - // // Status: txSourceData.passed, // [1,0, …], - ordered corresponding to Txids; 1 for applied; 0 for failed - // // build our data while skipping extras. - - // // insert corrections in order for each -1 in our local list (or write from our temp lists above) - // let ourCounter = 0 - // let solutionIndex = 0 - // for (let i = 0; i < ourHashSet.indexMap.length; i++) { - // let currentIndex = ourHashSet.indexMap[i] - // if (currentIndex >= 0) { - // // pull from our list? but we have already removed stuff? - // newTxList.hashes[i] = txSourceList.hashes[currentIndex] // newTxList.thashes[ourCounter] - // // newTxList.passed[i] = newTxList.tpassed[ourCounter] - // // newTxList.txs[i] = newTxList.ttxs[ourCounter] - - // if (newTxList.hashes[i] == null) { - // if (log) /* prettier-ignore */ if (logFlags.console) console.log(`testHashsetSolution error null at i: ${i} solutionIndex: ${solutionIndex} ourCounter: ${ourCounter}`) - // return false - // } - // ourCounter++ - // } else { - // // repairTracker.solutionDeltas.push({ i: requestsByHost[i].requests[j], tx: acceptedTX, pf: result.passFail[j] }) - // // let solutionDelta = repairTracker.solutionDeltas[solutionIndex] - - // let correction = solutionList[solutionIndex] - - // if (correction == null) { - // continue - // } - // // if (!solutionDelta) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error( `_mergeRepairDataIntoLocalState2 a error solutionDelta=null solutionIndex: ${solutionIndex} i:${i} of ${ourHashSet.indexMap.length} deltas: ${utils.stringifyReduce(repairTracker.solutionDeltas)}`) - // // } - // // insert the next one - // newTxList.hashes[i] = solutionTxList.hashes[correction.i] // solutionDelta.tx.id - - // // newTxList.states[i] = solutionTxList.states[correction.i] // TXSTATE_TODO - - // if (newTxList.hashes[i] == null) { - // if (log) /* prettier-ignore */ if (logFlags.console) console.log(`testHashsetSolution error null at i: ${i} solutionIndex: ${solutionIndex} ourCounter: ${ourCounter}`) - // } - // // newTxList.passed[i] = solutionDelta.pf - // // newTxList.txs[i] = solutionDelta.tx - // solutionIndex++ - // // if (newTxList.hashes[i] == null) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error( `_mergeRepairDataIntoLocalState2 b error null at i: ${i} solutionIndex: ${solutionIndex} ourCounter: ${ourCounter}`) - // // } - // } - // } - - // hashSet = '' - // // for (let hash of newTxList.hashes) { - // // if (!hash) { - // // hashSet += 'xx' - // // continue - // // } - // // hashSet += hash.slice(0, stepSize) - // // } - // hashSet = Depricated.createHashSetString(newTxList.hashes, null) // TXSTATE_TODO newTxList.states - - // if (solutionHashSet.hashSet !== hashSet) { - // return false - // } - - // if (log) if (logFlags.console) console.log(`solved set len: ${hashSet.length / stepSize} : ${hashSet}`) - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug( `_mergeRepairDataIntoLocalState2 c len: ${ourHashSet.indexMap.length} solutionIndex: ${solutionIndex} ourCounter: ${ourCounter} ourHashSet: ${hashSet}`) - - // return true - // } - - // /** - // * createHashSetString - // * @param {*} txHashes // todo find correct values - // * @param {*} dataHashes - // * @returns {*} //todo correct type - // */ - // static createHashSetString(txHashes: string[], dataHashes: string[] | null) { - // let hashSet = '' - - // if (dataHashes == null) { - // for (let i = 0; i < txHashes.length; i++) { - // let txHash = txHashes[i] - - // if (!txHash) { - // txHash = 'xx' - // } - - // hashSet += txHash.slice(0, cHashSetTXStepSize + cHashSetDataStepSize) - // } - // return hashSet - // } else { - // for (let i = 0; i < txHashes.length; i++) { - // let txHash = txHashes[i] - // let dataHash = dataHashes[i] - // if (!txHash) { - // txHash = 'xx' - // } - // if (!dataHash) { - // dataHash = 'xx' - // } - // dataHash = 'xx' // temp hack stop tracking data hashes for now. - // hashSet += txHash.slice(0, cHashSetTXStepSize) - // hashSet += dataHash.slice(0, cHashSetDataStepSize) - // } - // } - - // return hashSet - // } - - // /** - // * getTXListByKey - // * just an alternative to getTXList where the calling code has alredy formed the cycle key - // * @param {string} key the cycle based key c## - // * @param {number} partitionId - // * @returns {TxTallyList} - // */ - // getTXListByKey(key: string, partitionId: number): TxTallyList { - // // let txList = this.txByCycle[key] - // // if (!txList) { - // // txList = { hashes: [], passed: [], txs: [], processed: false, states: [] } // ,txById: {} states may be an array of arraywith account after states - // // this.txByCycle[key] = txList - // // } - - // let txListByPartition = this.stateManager.partitionObjects.txByCycleByPartition[key] - // let pkey = 'p' + partitionId - // // now search for the correct partition - // if (!txListByPartition) { - // txListByPartition = {} - // this.stateManager.partitionObjects.txByCycleByPartition[key] = txListByPartition - // } - // let txList = txListByPartition[pkey] - // if (!txList) { - // txList = { hashes: [], passed: [], txs: [], processed: false, states: [] } // , txById: {} - // txListByPartition[pkey] = txList - // } - // return txList - // } - - /*** - * ####### ## ######## ######## ######## ######## ### #### ######## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ######## ###### ######## ## ## ## ######## - * ## ## ## ## ## ## ## ## ## ######### ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ####### ######## ######## ## ## ######## ## ## ## #### ## ## - */ - - // /** - // * _getRepairTrackerForCycle - // * @param {number} counter - // * @param {number} partition - // * @returns {RepairTracker} - // */ - // _getRepairTrackerForCycle(counter: number, partition: number) { - // let key = 'c' + counter - // let key2 = 'p' + partition - // let repairsByPartition = this.repairTrackingByCycleById[key] - // if (!repairsByPartition) { - // repairsByPartition = {} - // this.repairTrackingByCycleById[key] = repairsByPartition - // } - // let repairTracker = repairsByPartition[key2] - // if (!repairTracker) { - // // triedHashes: Hashes for partition objects that we have tried to reconcile with already - // // removedTXIds: a list of TXIds that we have removed - // // repairedTXs: a list of TXIds that we have added in - // // newPendingTXs: a list of TXs we fetched that are ready to process - // // newFailedTXs: a list of TXs that we fetched, they had failed so we save them but do not apply them - // // extraTXIds: a list of TXIds that our partition has that the leading partition does not. This is what we need to remove - // // missingTXIds: a list of TXIds that our partition has that the leading partition has that we don't. We will need to add these in using the list newPendingTXs - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`_getRepairTrackerForCycle: creating for cycle:${counter} partition:${partition}`) - // repairTracker = { - // triedHashes: [], - // numNodes: this.stateManager.lastActiveNodeCount, // num nodes that we send partition results to - // counter: counter, - // partitionId: partition, - // key: key, - // key2: key2, - // removedTXIds: [], - // repairedTXs: [], - // newPendingTXs: [], - // newFailedTXs: [], - // extraTXIds: [], - // // extraTXs: [], - // missingTXIds: [], - // repairing: false, - // repairsNeeded: false, - // busy: false, - // txRepairComplete: false, - // txRepairReady: false, - // evaluationStarted: false, - // evaluationComplete: false, - // awaitWinningHash: false, - // repairsFullyComplete: false, - // } - // repairsByPartition[key2] = repairTracker - - // // this.dataRepairStack.push(repairTracker) - // // this.dataRepairsStarted++ - - // // let combinedKey = key + key2 - // // if (this.repairStartedMap.has(combinedKey)) { - // // if (logFlags.verbose) this.mainLogger.error(`Already started repair on ${combinedKey}`) - // // } else { - // // this.repairStartedMap.set(combinedKey, true) - // // } - // } - // return repairTracker - // } - - // /** - // * repairTrackerMarkFinished - // * @param {RepairTracker} repairTracker - // * @param {string} debugTag - // */ - // repairTrackerMarkFinished(repairTracker: RepairTracker, debugTag: string) { - // repairTracker.repairsFullyComplete = true - - // let combinedKey = repairTracker.key + repairTracker.key2 - // if (this.repairStartedMap.has(combinedKey)) { - // if (this.repairCompletedMap.has(combinedKey)) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`repairStats: finished repair ${combinedKey} -alreadyFlagged tag:${debugTag}`) - // } else { - // this.stateManager.dataRepairsCompleted++ - // this.repairCompletedMap.set(combinedKey, true) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`repairStats: finished repair ${combinedKey} tag:${debugTag}`) - // } - // } else { - // // should be a trace? - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`repairStats: Calling complete on a key we dont have ${combinedKey} tag:${debugTag}`) - // } - - // for (let i = this.dataRepairStack.length - 1; i >= 0; i--) { - // let repairTracker1 = this.dataRepairStack[i] - // if (repairTracker1 === repairTracker) { - // this.dataRepairStack.splice(i, 1) - // } - // } - - // if (this.dataRepairStack.length === 0) { - // if (this.stateManager.stateIsGood === false) { - // if (logFlags.verbose) this.mainLogger.error(`No active data repair going on tag:${debugTag}`) - // } - // this.stateManager.stateIsGood = true - // this.stateManager.stateIsGood_activeRepairs = true - // this.stateManager.stateIsGood_txHashsetOld = true - // } - // } - - // /** - // * repairTrackerClearForNextRepair - // * @param {RepairTracker} repairTracker - // */ - // repairTrackerClearForNextRepair(repairTracker: RepairTracker) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` repairTrackerClearForNextRepair cycleNumber: ${repairTracker.counter} parition: ${repairTracker.partitionId} `) - // repairTracker.removedTXIds = [] - // repairTracker.repairedTXs = [] - // repairTracker.newPendingTXs = [] - // repairTracker.newFailedTXs = [] - // repairTracker.extraTXIds = [] - // repairTracker.missingTXIds = [] - // } - - // /** - // * mergeAndApplyTXRepairs - // * @param {number} cycleNumber - // * @param {number} specificParition the old version of this would repair all partitions but we had to wait. this works on just one partition - // */ - // async mergeAndApplyTXRepairs(cycleNumber: number, specificParition: number) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs cycleNumber ${cycleNumber} partition: ${specificParition}`) - // // walk through all txs for this cycle. - // // get or create entries for accounts. - // // track when they have missing txs or wrong txs - - // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(cycleNumber) - // if (lastCycleShardValues == null) { - // throw new Error('mergeAndApplyTXRepairs lastCycleShardValues == null') - // } - // if (lastCycleShardValues.ourConsensusPartitions == null) { - // throw new Error('mergeAndApplyTXRepairs lastCycleShardValues.ourConsensusPartitions') - // } - - // for (let partitionID of lastCycleShardValues.ourConsensusPartitions) { - // // this is an attempt to just repair one parition. - // if (partitionID !== specificParition) { - // continue - // } - - // let allTXsToApply: StringNumberObjectMap = {} - // let allExtraTXids: StringNumberObjectMap = {} - // let allAccountsToResetById: StringNumberObjectMap = {} - // let txIDToAcc: TxIDToSourceTargetObjectMap = {} - // let allNewTXsById: TxObjectById = {} - // // get all txs and sort them - // let repairsByPartition = this.repairTrackingByCycleById['c' + cycleNumber] - // // let partitionKeys = Object.keys(repairsByPartition) - // // for (let key of partitionKeys) { - // let key = 'p' + partitionID - // let repairEntry = repairsByPartition[key] - // for (let tx of repairEntry.newPendingTXs) { - // if (utils.isString(tx.data)) { - // // @ts-ignore sometimes we have a data field that gets stuck as a string. would be smarter to fix this upstream. - // tx.data = JSON.parse(tx.data) - // } - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - - // if (!keysResponse) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs problem with keysResp ${utils.stringifyReduce(keysResponse)} tx: ${utils.stringifyReduce(tx)}`) - // } - - // let { sourceKeys, targetKeys } = keysResponse - - // for (let accountID of sourceKeys) { - // allAccountsToResetById[accountID] = 1 - // } - // for (let accountID of targetKeys) { - // allAccountsToResetById[accountID] = 1 - // } - // allNewTXsById[tx.id] = tx - // txIDToAcc[tx.id] = { sourceKeys, targetKeys } - // } - // for (let tx of repairEntry.missingTXIds) { - // allTXsToApply[tx] = 1 - // } - // for (let tx of repairEntry.extraTXIds) { - // allExtraTXids[tx] = 1 - // // TODO Repair. ugh have to query our data and figure out which accounts need to be reset. - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs: extra: ${utils.stringifyReduce(allExtraTXids)} txIDToAcc: ${utils.stringifyReduce(txIDToAcc)}`) - - // // todo repair: hmmm also reset accounts have a tx we need to remove. - // // } - - // let txList = this.stateManager.partitionObjects.getTXList(cycleNumber, partitionID) // done todo sharding: pass partition ID - - // let txIDToAccCount = 0 - // let txIDResetExtraCount = 0 - // // build a list with our existing txs, but dont include the bad ones - // if (txList) { - // for (let i = 0; i < txList.txs.length; i++) { - // let tx = txList.txs[i] - // if (allExtraTXids[tx.id]) { - // // this was a bad tx dont include it. we have to look up the account associated with this tx and make sure they get reset - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - // if (!keysResponse) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs problem with keysResp2 ${utils.stringifyReduce(keysResponse)} tx: ${utils.stringifyReduce(tx)}`) - // } - // let { sourceKeys, targetKeys } = keysResponse - // for (let accountID of sourceKeys) { - // allAccountsToResetById[accountID] = 1 - // txIDResetExtraCount++ - // } - // for (let accountID of targetKeys) { - // allAccountsToResetById[accountID] = 1 - // txIDResetExtraCount++ - // } - // } else { - // // a good tx that we had earlier - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - // let { sourceKeys, targetKeys } = keysResponse - // allNewTXsById[tx.id] = tx - // txIDToAcc[tx.id] = { sourceKeys, targetKeys } - // txIDToAccCount++ - // // we will only play back the txs on accounts that point to allAccountsToResetById - // } - // } - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs txList not found for: cycle: ${cycleNumber} in ${utils.stringifyReduce(this.stateManager.partitionObjects.txByCycleByPartition)}`) - // } - - // // build and sort a list of TXs that we need to apply - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs txIDResetExtraCount: ${txIDResetExtraCount} allAccountsToResetById ${utils.stringifyReduce(allAccountsToResetById)}`) - // // reset accounts - // let accountKeys = Object.keys(allAccountsToResetById) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs revert accountKeys ${utils.stringifyReduce(accountKeys)}`) - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs FIFO lock outer: ${cycleNumber} ${utils.stringifyReduce(accountKeys)}`) - // let ourAccountLocks = await this.stateManager.bulkFifoLockAccounts(accountKeys) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs FIFO lock inner: ${cycleNumber} ${utils.stringifyReduce(accountKeys)}`) - - // // let replacmentAccounts = //returned by the below function for debug - // await this._revertAccounts(accountKeys, cycleNumber) - - // // todo sharding - done extracted tx list calcs to run just for this partition inside of here. how does this relate to having a shard for every?? - // // convert allNewTXsById map to newTXList list - // let newTXList = [] - // let txKeys = Object.keys(allNewTXsById) - // for (let txKey of txKeys) { - // let tx = allNewTXsById[txKey] - // newTXList.push(tx) - // } - - // // sort the list by ascending timestamp - // newTXList.sort(utils.sortTimestampAsc) // (function (a, b) { return a.timestamp - b.timestamp }) - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs newTXList ${utils.stringifyReduce(newTXList)}`) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs newTXList.length: ${newTXList.length} txKeys.length: ${txKeys.length} txIDToAccCount: ${txIDToAccCount}`) - - // let applyCount = 0 - // let applyFailCount = 0 - // let hasEffect = false - - // let accountValuesByKey: AccountValuesByKey = {} - // // let wrappedAccountResults = this.app.getAccountDataByList(accountKeys) - // // for (let wrappedData of wrappedAccountResults) { - // // wrappedData.isPartial = false - // // accountValuesByKey[wrappedData.accountId] = wrappedData - // // } - // // let wrappedAccountResults=[] - // // for(let key of accountKeys){ - // // this.app.get - // // } - - // // todo sharding - done (solved by brining newTX clacs inside of this loop) does newTXList need to be filtered? we are looping over every partition. could this cause us to duplicate effort? YES allNewTXsById is handled above/outside of this loop - // for (let tx of newTXList) { - // let keysFilter = txIDToAcc[tx.id] - // // need a transform to map all txs that would matter. - // try { - // if (keysFilter) { - // let acountsFilter: AccountFilter = {} // this is a filter of accounts that we want to write to - // // find which accounts need txs applied. - // hasEffect = false - // for (let accountID of keysFilter.sourceKeys) { - // if (allAccountsToResetById[accountID]) { - // acountsFilter[accountID] = 1 - // hasEffect = true - // } - // } - // for (let accountID of keysFilter.targetKeys) { - // if (allAccountsToResetById[accountID]) { - // acountsFilter[accountID] = 1 - // hasEffect = true - // } - // } - // if (!hasEffect) { - // // no need to apply this tx because it would do nothing - // continue - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs apply tx ${utils.makeShortHash(tx.id)} ${tx.timestamp} data: ${utils.stringifyReduce(tx)} with filter: ${utils.stringifyReduce(acountsFilter)}`) - // let hasStateTableData = false // may or may not have it but not tracking yet - - // // TSConversion old way used to do this but seem incorrect to have receipt under data! - // // HACK!! receipts sent across the net to us may need to get re parsed - // // if (utils.isString(tx.data.receipt)) { - // // tx.data.receipt = JSON.parse(tx.data.receipt) - // // } - - // if (utils.isString(tx.receipt)) { - // //@ts-ignore - // tx.receipt = JSON.parse(tx.receipt) - // } - - // // todo needs wrapped states! and/or localCachedData - - // // Need to build up this data. - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - // let wrappedStates: WrappedResponses = {} - // let localCachedData: LocalCachedData = {} - // for (let key of keysResponse.allKeys) { - // // build wrapped states - // // let wrappedState = await this.app.getRelevantData(key, tx.data) - - // let wrappedState: Shardus.WrappedResponse = accountValuesByKey[key] // need to init ths data. allAccountsToResetById[key] - // if (wrappedState == null) { - // // Theoretically could get this data from when we revert the data above.. - // wrappedState = await this.app.getRelevantData(key, tx.data) - // accountValuesByKey[key] = wrappedState - // } else { - // wrappedState.accountCreated = false // kinda crazy assumption - // } - // wrappedStates[key] = wrappedState - // localCachedData[key] = wrappedState.localCache - // // delete wrappedState.localCache - // } - - // let success = await this.testAccountTime(tx.data, wrappedStates) - - // if (!success) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(' testAccountTime failed. calling apoptosis. mergeAndApplyTXRepairs' + utils.stringifyReduce(tx)) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('testAccountTime_failed', `${tx.id}`, ` testAccountTime failed. calling apoptosis. mergeAndApplyTXRepairs`) - - // this.statemanager_fatal(`testAccountTime_failed`, ' testAccountTime failed. calling apoptosis. mergeAndApplyTXRepairs' + utils.stringifyReduce(tx)) - - // // return - // this.p2p.initApoptosis() // todo turn this back on - // // // return { success: false, reason: 'testAccountTime failed' } - // break - // } - - // let applied = await this.tryApplyTransaction(tx, hasStateTableData, true, acountsFilter, wrappedStates, localCachedData) // TODO app interface changes.. how to get and pass the state wrapped account state in, (maybe simple function right above this - // // accountValuesByKey = {} // clear this. it forces more db work but avoids issue with some stale flags - // if (!applied) { - // applyFailCount++ - // if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs apply failed`) - // } else { - // applyCount++ - // } - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs no for ${tx.id} in ${utils.stringifyReduce(txIDToAcc)}`) - // } - // } catch (ex) { - // this.mainLogger.debug('_repair: startRepairProcess mergeAndApplyTXRepairs apply: ' + ` ${utils.stringifyReduce({ tx, keysFilter })} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // this.statemanager_fatal(`mergeAndApplyTXRepairs_ex`, '_repair: startRepairProcess mergeAndApplyTXRepairs apply: ' + ` ${utils.stringifyReduce({ tx, keysFilter })} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs applyCount ${applyCount} applyFailCount: ${applyFailCount}`) - // } - - // // unlock the accounts we locked... todo maybe put this in a finally statement? - // this.stateManager.bulkFifoUnlockAccounts(accountKeys, ourAccountLocks) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair mergeAndApplyTXRepairs FIFO unlock: ${cycleNumber} ${utils.stringifyReduce(accountKeys)}`) - // } - // } - - // /** - // * updateTrackingAndPrepareChanges - // * @param {number} cycleNumber - // * @param {number} specificParition the old version of this would repair all partitions but we had to wait. this works on just one partition - // */ - // async updateTrackingAndPrepareRepairs(cycleNumber: number, specificParition: number) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs cycleNumber ${cycleNumber} partition: ${specificParition}`) - // // walk through all txs for this cycle. - // // get or create entries for accounts. - // // track when they have missing txs or wrong txs - // let debugKey = `c${cycleNumber}p${specificParition}` - // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(cycleNumber) - // let paritionsServiced = 0 - // try { - // // this was locking us to consensus only partitions. really just preap anything that is called on this fuciton since other logic may be doing work - // // on stored partitions. - - // // for (let partitionID of lastCycleShardValues.ourConsensusPartitions) { - // // // this is an attempt to just repair one parition. - // // if (partitionID !== specificParition) { - // // continue - // // } - // let partitionID = specificParition - // paritionsServiced++ - // let allTXsToApply: StringNumberObjectMap = {} - // let allExtraTXids: StringNumberObjectMap = {} - // /** @type {Object.} */ - // let allAccountsToResetById: StringNumberObjectMap = {} - // /** @type {Object.} */ - // let txIDToAcc: TxIDToSourceTargetObjectMap = {} - // let allNewTXsById: TxObjectById = {} - // // get all txs and sort them - // let repairsByPartition = this.repairTrackingByCycleById['c' + cycleNumber] - // // let partitionKeys = Object.keys(repairsByPartition) - // // for (let key of partitionKeys) { - // let key = 'p' + partitionID - // let repairEntry = repairsByPartition[key] - // for (let tx of repairEntry.newPendingTXs) { - // if (utils.isString(tx.data)) { - // // @ts-ignore sometimes we have a data field that gets stuck as a string. would be smarter to fix this upstream. - // tx.data = JSON.parse(tx.data) - // } - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - - // if (!keysResponse) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs problem with keysResp ${utils.stringifyReduce(keysResponse)} tx: ${utils.stringifyReduce(tx)}`) - // } - - // let { sourceKeys, targetKeys } = keysResponse - - // for (let accountID of sourceKeys) { - // allAccountsToResetById[accountID] = 1 - // } - // for (let accountID of targetKeys) { - // allAccountsToResetById[accountID] = 1 - // } - // allNewTXsById[tx.id] = tx - // txIDToAcc[tx.id] = { sourceKeys, targetKeys } - // } - // for (let tx of repairEntry.missingTXIds) { - // allTXsToApply[tx] = 1 - // } - // for (let tx of repairEntry.extraTXIds) { - // allExtraTXids[tx] = 1 - // // TODO Repair. ugh have to query our data and figure out which accounts need to be reset. - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs: extra: ${utils.stringifyReduce(allExtraTXids)} txIDToAcc: ${utils.stringifyReduce(txIDToAcc)}`) - - // // todo repair: hmmm also reset accounts have a tx we need to remove. - // // } - - // let txList = this.stateManager.partitionObjects.getTXList(cycleNumber, partitionID) // done todo sharding: pass partition ID - - // let txIDToAccCount = 0 - // let txIDResetExtraCount = 0 - // // build a list with our existing txs, but dont include the bad ones - // if (txList) { - // for (let i = 0; i < txList.txs.length; i++) { - // let tx = txList.txs[i] - // if (allExtraTXids[tx.id]) { - // // this was a bad tx dont include it. we have to look up the account associated with this tx and make sure they get reset - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - // if (!keysResponse) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs problem with keysResp2 ${utils.stringifyReduce(keysResponse)} tx: ${utils.stringifyReduce(tx)}`) - // } - // let { sourceKeys, targetKeys } = keysResponse - // for (let accountID of sourceKeys) { - // allAccountsToResetById[accountID] = 1 - // txIDResetExtraCount++ - // } - // for (let accountID of targetKeys) { - // allAccountsToResetById[accountID] = 1 - // txIDResetExtraCount++ - // } - // } else { - // // a good tx that we had earlier - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - // let { sourceKeys, targetKeys } = keysResponse - // allNewTXsById[tx.id] = tx - // txIDToAcc[tx.id] = { sourceKeys, targetKeys } - // txIDToAccCount++ - // // we will only play back the txs on accounts that point to allAccountsToResetById - // } - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs txIDResetExtraCount:${txIDResetExtraCount} txIDToAccCount: ${txIDToAccCount}`) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs txList not found for: cycle: ${cycleNumber} in ${utils.stringifyReduce(this.stateManager.partitionObjects.txByCycleByPartition)}`) - // } - - // // build and sort a list of TXs that we need to apply - - // // OLD reset account code was here. - - // // todo sharding - done extracted tx list calcs to run just for this partition inside of here. how does this relate to having a shard for every?? - // // convert allNewTXsById map to newTXList list - // let newTXList = [] - // let txKeys = Object.keys(allNewTXsById) - // for (let txKey of txKeys) { - // let tx = allNewTXsById[txKey] - // newTXList.push(tx) - // } - - // // sort the list by ascending timestamp - // newTXList.sort(utils.sortTimestampAsc) // function (a, b) { return a.timestamp - b.timestamp }) - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs newTXList ${utils.stringifyReduce(newTXList)}`) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs newTXList.length: ${newTXList.length} txKeys.length: ${txKeys.length} txIDToAccCount: ${txIDToAccCount}`) - - // // Save the results of this computation for later - // /** @type {UpdateRepairData} */ - // let updateData: UpdateRepairData = { newTXList, allAccountsToResetById, partitionId: specificParition, txIDToAcc } - // let ckey = 'c' + cycleNumber - // if (this.repairUpdateDataByCycle[ckey] == null) { - // this.repairUpdateDataByCycle[ckey] = [] - // } - // this.repairUpdateDataByCycle[ckey].push(updateData) - - // // how will the partition object get updated though?? - // // } - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair updateTrackingAndPrepareRepairs finished`) - // if (paritionsServiced === 0) { - // this.statemanager_fatal(`_updateTrackingAndPrepareRepairs_fail`, `_updateTrackingAndPrepareRepairs failed. not partitions serviced: ${debugKey} our consensus:${utils.stringifyReduce(lastCycleShardValues?.ourConsensusPartitions)} `) - // } - // } catch (ex) { - // this.mainLogger.debug('__updateTrackingAndPrepareRepairs: exception ' + ` ${debugKey} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // this.statemanager_fatal(`_updateTrackingAndPrepareRepairs_ex`, '__updateTrackingAndPrepareRepairs: exception ' + ` ${debugKey} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } - // } - - // /** - // * updateTrackingAndPrepareChanges - // * @param {number} cycleNumber - // */ - // async applyAllPreparedRepairs(cycleNumber: number) { - // if (this.applyAllPreparedRepairsRunning === true) { - // return - // } - // this.applyAllPreparedRepairsRunning = true - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair applyAllPreparedRepairs cycleNumber ${cycleNumber}`) - - // this.mainLogger.debug(`applyAllPreparedRepairs c:${cycleNumber}`) - - // let ckey = 'c' + cycleNumber - // let repairDataList = this.repairUpdateDataByCycle[ckey] - - // let txIDToAcc: TxIDToKeyObjectMap = {} - // let allAccountsToResetById: AccountBoolObjectMap = {} - // let newTXList: AcceptedTx[] = [] - // for (let repairData of repairDataList) { - // newTXList = newTXList.concat(repairData.newTXList) - // allAccountsToResetById = Object.assign(allAccountsToResetById, repairData.allAccountsToResetById) - // txIDToAcc = Object.assign(txIDToAcc, repairData.txIDToAcc) - // this.mainLogger.debug(`applyAllPreparedRepairs c${cycleNumber}p${repairData.partitionId} reset:${Object.keys(repairData.allAccountsToResetById).length} txIDToAcc:${Object.keys(repairData.txIDToAcc).length} keys: ${utils.stringifyReduce(Object.keys(repairData.allAccountsToResetById))} `) - // } - // this.mainLogger.debug(`applyAllPreparedRepairs total reset:${Object.keys(allAccountsToResetById).length} txIDToAcc:${Object.keys(txIDToAcc).length}`) - - // newTXList.sort(utils.sortTimestampAsc) // function (a, b) { return a.timestamp - b.timestamp }) - - // // build and sort a list of TXs that we need to apply - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs allAccountsToResetById ${utils.stringifyReduce(allAccountsToResetById)}`) - // // reset accounts - // let accountKeys = Object.keys(allAccountsToResetById) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs revert accountKeys ${utils.stringifyReduce(accountKeys)}`) - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs FIFO lock outer: ${cycleNumber} ${utils.stringifyReduce(accountKeys)}`) - // let ourAccountLocks = await this.stateManager.bulkFifoLockAccounts(accountKeys) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs FIFO lock inner: ${cycleNumber} ${utils.stringifyReduce(accountKeys)}`) - - // // let replacmentAccounts = //returned by the below function for debug - // await this._revertAccounts(accountKeys, cycleNumber) - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs newTXList ${utils.stringifyReduce(newTXList)}`) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs newTXList.length: ${newTXList.length}`) - - // let applyCount = 0 - // let applyFailCount = 0 - // let hasEffect = false - // let hasNonGlobalEffect = false - - // // TSConversion WrappedStates issue - // let accountValuesByKey: WrappedResponses = {} - - // let seenTXs: StringBoolObjectMap = {} - // for (let tx of newTXList) { - // if (seenTXs[tx.id] === true) { - // this.mainLogger.debug(`applyAllPreparedRepairs skipped double: ${utils.makeShortHash(tx.id)} ${tx.timestamp} `) - // continue - // } - // seenTXs[tx.id] = true - - // let keysFilter = txIDToAcc[tx.id] - // // need a transform to map all txs that would matter. - // try { - // if (keysFilter) { - // let acountsFilter: AccountFilter = {} // this is a filter of accounts that we want to write to - // // find which accounts need txs applied. - // hasEffect = false - // hasNonGlobalEffect = false - // for (let accountID of keysFilter.sourceKeys) { - // if (allAccountsToResetById[accountID]) { - // acountsFilter[accountID] = 1 - // hasEffect = true - // if (this.stateManager.accountGlobals.isGlobalAccount(accountID) === false) { - // hasNonGlobalEffect = true - // } - // } - // } - // for (let accountID of keysFilter.targetKeys) { - // if (allAccountsToResetById[accountID]) { - // acountsFilter[accountID] = 1 - // hasEffect = true - // if (this.stateManager.accountGlobals.isGlobalAccount(accountID) === false) { - // hasNonGlobalEffect = true - // } - // } - // } - // if (!hasEffect) { - // // no need to apply this tx because it would do nothing - // continue - // } - // if (!hasNonGlobalEffect) { - // //if only a global account involved then dont reset! - // continue - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair applyAllPreparedRepairs apply tx ${utils.makeShortHash(tx.id)} ${tx.timestamp} data: ${utils.stringifyReduce(tx)} with filter: ${utils.stringifyReduce(acountsFilter)}`) - // let hasStateTableData = false // may or may not have it but not tracking yet - - // // TSConversion old way used to do this but seem incorrect to have receipt under data! - // // // HACK!! receipts sent across the net to us may need to get re parsed - // // if (utils.isString(tx.data.receipt)) { - // // tx.data.receipt = JSON.parse(tx.data.receipt) - // // } - // if (utils.isString(tx.receipt)) { - // //@ts-ignore - // tx.receipt = JSON.parse(tx.receipt) - // } - - // // todo needs wrapped states! and/or localCachedData - - // // Need to build up this data. - // let keysResponse = this.app.getKeyFromTransaction(tx.data) - // let wrappedStates: WrappedResponses = {} - // let localCachedData: LocalCachedData = {} - // for (let key of keysResponse.allKeys) { - // // build wrapped states - // // let wrappedState = await this.app.getRelevantData(key, tx.data) - - // let wrappedState: Shardus.WrappedResponse = accountValuesByKey[key] // need to init ths data. allAccountsToResetById[key] - // if (wrappedState == null) { - // // Theoretically could get this data from when we revert the data above.. - // wrappedState = await this.app.getRelevantData(key, tx.data) - // // what to do in failure case. - // accountValuesByKey[key] = wrappedState - // } else { - // wrappedState.accountCreated = false // kinda crazy assumption - // } - // wrappedStates[key] = wrappedState - // localCachedData[key] = wrappedState.localCache - // // delete wrappedState.localCache - // } - - // let success = await this.testAccountTime(tx.data, wrappedStates) - - // if (!success) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(' applyAllPreparedRepairs testAccountTime failed. calling apoptosis. applyAllPreparedRepairs' + utils.stringifyReduce(tx)) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('testAccountTime_failed', `${tx.id}`, ` applyAllPreparedRepairs testAccountTime failed. calling apoptosis. applyAllPreparedRepairs`) - // this.statemanager_fatal(`applyAllPreparedRepairs_fail`, ' testAccountTime failed. calling apoptosis. applyAllPreparedRepairs' + utils.stringifyReduce(tx)) - - // // return - // this.p2p.initApoptosis() // todo turn this back on - // // // return { success: false, reason: 'testAccountTime failed' } - // break - // } - - // // TODO: globalaccounts this is where we go through the account state and just in time grab global accounts from the cache we made in the revert section from backup copies. - // // TODO Perf probably could prepare of this inforamation above more efficiently but for now this is most simple and self contained. - - // //TODO verify that we will even have wrapped states at this point in the repair without doing some extra steps. - // let wrappedStateKeys = Object.keys(wrappedStates) - // for (let wrappedStateKey of wrappedStateKeys) { - // let wrappedState = wrappedStates[wrappedStateKey] - - // // if(wrappedState == null) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error( ` _repair applyAllPreparedRepairs wrappedState == null ${utils.stringifyReduce(wrappedStateKey)} ${tx.timestamp}`) - // // //could continue but want to see if there is more we can log. - // // } - // //is it global. - // if (this.stateManager.accountGlobals.isGlobalAccount(wrappedStateKey)) { - // // wrappedState.accountId)){ - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', `applyAllPreparedRepairs - has`, ` ${wrappedState.accountId} ${wrappedStateKey}`) - // if (wrappedState != null) { - // let globalValueSnapshot = this.stateManager.accountGlobals.getGlobalAccountValueAtTime(wrappedState.accountId, tx.timestamp) - - // if (globalValueSnapshot == null) { - // //todo some error? - // let globalAccountBackupList = this.stateManager.accountGlobals.getGlobalAccountBackupList(wrappedStateKey) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair applyAllPreparedRepairs has global key but no snapshot at time ${tx.timestamp} entries:${globalAccountBackupList.length} ${utils.stringifyReduce(globalAccountBackupList.map((a) => `${a.timestamp} ${utils.makeShortHash(a.accountId)} `))} `) - // continue - // } - // // build a new wrapped response to insert - // let newWrappedResponse: Shardus.WrappedResponse = { accountCreated: wrappedState.accountCreated, isPartial: false, accountId: wrappedState.accountId, timestamp: wrappedState.timestamp, stateId: globalValueSnapshot.hash, data: globalValueSnapshot.data } - // //set this new value into our wrapped states. - // wrappedStates[wrappedStateKey] = newWrappedResponse // update!! - // // insert thes data into the wrapped states. - // // yikes probably cant do local cached data at this point. - // if (logFlags.verbose) { - // let globalAccountBackupList = this.stateManager.accountGlobals.getGlobalAccountBackupList(wrappedStateKey) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair applyAllPreparedRepairs has global key details ${tx.timestamp} entries:${globalAccountBackupList.length} ${utils.stringifyReduce(globalAccountBackupList.map((a) => `${a.timestamp} ${utils.makeShortHash(a.accountId)} `))} `) - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair applyAllPreparedRepairs got global account to repair from: ${utils.stringifyReduce(newWrappedResponse)}`) - // } - // } else { - // if (wrappedState == null) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair applyAllPreparedRepairs is not a global account but wrapped state == null ${utils.stringifyReduce(wrappedStateKey)} ${tx.timestamp}`) - // } - // } - // } - - // let applied = await this.tryApplyTransaction(tx, hasStateTableData, /** repairing */ true, acountsFilter, wrappedStates, localCachedData) // TODO app interface changes.. how to get and pass the state wrapped account state in, (maybe simple function right above this - // // accountValuesByKey = {} // clear this. it forces more db work but avoids issue with some stale flags - // if (!applied) { - // applyFailCount++ - // if (logFlags.verbose) this.mainLogger.debug(` _repair applyAllPreparedRepairs apply failed`) - // } else { - // applyCount++ - // } - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair applyAllPreparedRepairs no for ${tx.id} in ${utils.stringifyReduce(txIDToAcc)}`) - // } - // } catch (ex) { - // this.mainLogger.debug('_repair: startRepairProcess applyAllPreparedRepairs apply: ' + ` ${utils.stringifyReduce({ tx, keysFilter })} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // this.statemanager_fatal(`applyAllPreparedRepairs_fail`, '_repair: startRepairProcess applyAllPreparedRepairs apply: ' + ` ${utils.stringifyReduce({ tx, keysFilter })} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair applyAllPreparedRepairs applyCount ${applyCount} applyFailCount: ${applyFailCount}`) - // } - - // // unlock the accounts we locked... todo maybe put this in a finally statement? - // this.stateManager.bulkFifoUnlockAccounts(accountKeys, ourAccountLocks) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair applyAllPreparedRepairs FIFO unlock: ${cycleNumber} ${utils.stringifyReduce(accountKeys)}`) - // // } - // this.applyAllPreparedRepairsRunning = false - // } - - // /** - // * _revertAccounts - // * @param {string[]} accountIDs - // * @param {number} cycleNumber - // */ - // async _revertAccounts(accountIDs: string[], cycleNumber: number) { - // let cycle = this.p2p.state.getCycleByCounter(cycleNumber) - // let cycleEnd = (cycle.start + cycle.duration) * 1000 - // let cycleStart = cycle.start * 1000 - // cycleEnd -= this.stateManager.syncSettleTime // adjust by sync settle time - // cycleStart -= this.stateManager.syncSettleTime // adjust by sync settle time - // let replacmentAccounts: Shardus.AccountsCopy[] - // let replacmentAccountsMinusGlobals = [] as Shardus.AccountsCopy[] - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair _revertAccounts start numAccounts: ${accountIDs.length} repairing cycle:${cycleNumber}`) - - // try { - // // query our account copies that are less than or equal to this cycle! - // let prevCycle = cycleNumber - 1 - - // replacmentAccounts = (await this.storage.getAccountReplacmentCopies(accountIDs, prevCycle)) as Shardus.AccountsCopy[] - - // if (replacmentAccounts.length > 0) { - // for (let accountData of replacmentAccounts) { - // if (utils.isString(accountData.data)) { - // accountData.data = JSON.parse(accountData.data) - // // hack, mode the owner so we can see the rewrite taking place - // // accountData.data.data.data = { rewrite: cycleNumber } - // } - - // if (accountData == null || accountData.data == null || accountData.accountId == null) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair _revertAccounts null account data found: ${accountData.accountId} cycle: ${cycleNumber} data: ${utils.stringifyReduce(accountData)}`) - // } else { - // // todo overkill - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair _revertAccounts reset: ${utils.makeShortHash(accountData.accountId)} ts: ${utils.makeShortHash(accountData.timestamp)} cycle: ${cycleNumber} data: ${utils.stringifyReduce(accountData)}`) - // } - // // TODO: globalaccounts - // //this is where we need to no reset a global account, but instead grab the replacment data and cache it - // /// //////////////////////// - // //let isGlobalAccount = this.stateManager.accountGlobals.globalAccountMap.has(accountData.accountId ) - - // //Try not reverting global accounts.. - // if (this.stateManager.accountGlobals.isGlobalAccount(accountData.accountId) === false) { - // replacmentAccountsMinusGlobals.push(accountData) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair _revertAccounts not a global account, add to list ${utils.makeShortHash(accountData.accountId)}`) - // } else { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair _revertAccounts was a global account, do not add to list ${utils.makeShortHash(accountData.accountId)}`) - // } - // } - // // tell the app to replace the account data - // //await this.app.resetAccountData(replacmentAccounts) - // await this.app.resetAccountData(replacmentAccountsMinusGlobals) - // // update local state. - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair _revertAccounts No replacment accounts found!!! cycle <= :${prevCycle}`) - // } - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair _revertAccounts: ${accountIDs.length} replacmentAccounts ${replacmentAccounts.length} repairing cycle:${cycleNumber} replacmentAccountsMinusGlobals: ${replacmentAccountsMinusGlobals.length}`) - - // // TODO prodution. consider if we need a better set of checks before we delete an account! - // // If we don't have a replacement copy for an account we should try to delete it - - // // Find any accountIDs not in resetAccountData - // let accountsReverted: StringNumberObjectMap = {} - // let accountsToDelete: string[] = [] - // let debug = [] - // for (let accountData of replacmentAccounts) { - // accountsReverted[accountData.accountId] = 1 - // if (accountData.cycleNumber > prevCycle) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair _revertAccounts cycle too new for backup restore: ${accountData.cycleNumber} cycleNumber:${cycleNumber} timestamp:${accountData.timestamp}`) - // } - - // debug.push({ id: accountData.accountId, cycleNumber: accountData.cycleNumber, timestamp: accountData.timestamp, hash: accountData.hash, accHash: accountData.data.hash, accTs: accountData.data.timestamp }) - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair _revertAccounts: ${utils.stringifyReduce(debug)}`) - - // for (let accountID of accountIDs) { - // if (accountsReverted[accountID] == null) { - // accountsToDelete.push(accountID) - // } - // } - // if (accountsToDelete.length > 0) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair _revertAccounts delete some accounts ${utils.stringifyReduce(accountsToDelete)}`) - // await this.app.deleteAccountData(accountsToDelete) - // } - - // // mark for kill future txlist stuff for any accounts we nuked - - // // make a map to find impacted accounts - // let accMap: StringNumberObjectMap = {} - // for (let accid of accountIDs) { - // accMap[accid] = 1 - // } - // // check for this.tempTXRecords that involve accounts we are going to clear - // for (let txRecord of this.stateManager.partitionObjects.tempTXRecords) { - // // if (txRecord.txTS < cycleEnd) { - // let keysResponse = this.app.getKeyFromTransaction(txRecord.acceptedTx.data) - // if (!keysResponse) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair _revertAccounts problem with keysResp ${utils.stringifyReduce(keysResponse)} tx: ${utils.stringifyReduce(txRecord.acceptedTx)}`) - // } - // let { sourceKeys, targetKeys } = keysResponse - // for (let accountID of sourceKeys) { - // if (accMap[accountID]) { - // txRecord.redacted = cycleNumber - // } - // } - // for (let accountID of targetKeys) { - // if (accMap[accountID]) { - // txRecord.redacted = cycleNumber - // } - // } - // // } - // } - - // // clear out bad state table data!! - // // add number to clear future state table data too - // await this.storage.clearAccountStateTableByList(accountIDs, cycleStart, cycleEnd + 1000000) - - // // clear replacement copies for this cycle for these accounts! - - // // todo clear based on GTE!!! - // await this.storage.clearAccountReplacmentCopies(accountIDs, cycleNumber) - // } catch (ex) { - // this.mainLogger.debug('_repair: _revertAccounts mergeAndApplyTXRepairs ' + ` ${utils.stringifyReduce({ cycleNumber, cycleEnd, cycleStart, accountIDs })} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // this.statemanager_fatal(`_revertAccounts_ex`, '_repair: _revertAccounts mergeAndApplyTXRepairs ' + ` ${utils.stringifyReduce({ cycleNumber, cycleEnd, cycleStart, accountIDs })} ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } - - // return replacmentAccounts // this is for debugging reference - // } - - // async testAccountTime(tx: Shardus.OpaqueTransaction, wrappedStates: WrappedStates) { - // function tryGetAccountData(accountID: string) { - // return wrappedStates[accountID] - // } - - // try { - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { timestamp } = keysResponse // sourceKeys, targetKeys, - // // check account age to make sure it is older than the tx - // let failedAgeCheck = false - - // let accountKeys = Object.keys(wrappedStates) - // for (let key of accountKeys) { - // let accountEntry = tryGetAccountData(key) - // if (accountEntry.timestamp >= timestamp) { - // failedAgeCheck = true - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTime account has future state. id: ' + utils.makeShortHash(accountEntry.accountId) + ' time: ' + accountEntry.timestamp + ' txTime: ' + timestamp + ' delta: ' + (timestamp - accountEntry.timestamp)) - // } - // } - // if (failedAgeCheck) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('DATASYNC: testAccountTimesAndStateTable accounts have future state ' + timestamp) - // return false - // } - // } catch (ex) { - // this.statemanager_fatal(`testAccountTime-fail_ex`, 'testAccountTime failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // return false - // } - // return true // { success: true, hasStateTableData } - // } - - // state ids should be checked before applying this transaction because it may have already been applied while we were still syncing data. - // async tryApplyTransaction(acceptedTX: AcceptedTx, hasStateTableData: boolean, repairing: boolean, filter: AccountFilter, wrappedStates: WrappedResponses, localCachedData: LocalCachedData) { - // let ourLockID = -1 - // let accountDataList - // let txTs = 0 - // let accountKeys = [] - // let ourAccountLocks = null - // let applyResponse: Shardus.ApplyResponse | null = null - // //have to figure out if this is a global modifying tx, since that impacts if we will write to global account. - // let isGlobalModifyingTX = false - // let savedSomething = false - // try { - // let tx = acceptedTX.data - // // let receipt = acceptedTX.receipt - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { timestamp, debugInfo } = keysResponse - // txTs = timestamp - - // let queueEntry = this.stateManager.transactionQueue.getQueueEntry(acceptedTX.id) - // if (queueEntry != null) { - // if (queueEntry.globalModification === true) { - // isGlobalModifyingTX = true - // } - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction ts:${timestamp} repairing:${repairing} hasStateTableData:${hasStateTableData} isGlobalModifyingTX:${isGlobalModifyingTX} Applying! debugInfo: ${debugInfo}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction filter: ${utils.stringifyReduce(filter)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction acceptedTX: ${utils.stringifyReduce(acceptedTX)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction wrappedStates: ${utils.stringifyReduce(wrappedStates)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction localCachedData: ${utils.stringifyReduce(localCachedData)}`) - - // if (repairing !== true) { - // // get a list of modified account keys that we will lock - // let { sourceKeys, targetKeys } = keysResponse - // for (let accountID of sourceKeys) { - // accountKeys.push(accountID) - // } - // for (let accountID of targetKeys) { - // accountKeys.push(accountID) - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair tryApplyTransaction FIFO lock outer: ${utils.stringifyReduce(accountKeys)} `) - // ourAccountLocks = await this.stateManager.bulkFifoLockAccounts(accountKeys) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair tryApplyTransaction FIFO lock inner: ${utils.stringifyReduce(accountKeys)} ourLocks: ${utils.stringifyReduce(ourAccountLocks)}`) - // } - - // ourLockID = await this.stateManager.fifoLock('accountModification') - - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log(`tryApplyTransaction ts:${timestamp} repairing:${repairing} Applying!`) - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('APPSTATE: tryApplyTransaction ' + timestamp + ' Applying!' + ' source: ' + utils.makeShortHash(sourceAddress) + ' target: ' + utils.makeShortHash(targetAddress) + ' srchash_before:' + utils.makeShortHash(sourceState) + ' tgtHash_before: ' + utils.makeShortHash(targetState)) - // this.stateManager.transactionQueue.applySoftLock = true - - // // let replyObject = { stateTableResults: [], txId, txTimestamp, accountData: [] } - // // let wrappedStatesList = Object.values(wrappedStates) - - // // TSConversion need to check how save this cast is for the apply fuction, should probably do more in depth look at the tx param. - // applyResponse = this.app.apply(tx as Shardus.IncomingTransaction, wrappedStates) - // let { stateTableResults, accountData: _accountdata } = applyResponse - // accountDataList = _accountdata - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction post apply wrappedStates: ${utils.stringifyReduce(wrappedStates)}`) - // // wrappedStates are side effected for now - // savedSomething = await this.stateManager.setAccount(wrappedStates, localCachedData, applyResponse, isGlobalModifyingTX, filter) - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction accountData[${accountDataList.length}]: ${utils.stringifyReduce(accountDataList)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryApplyTransaction stateTableResults[${stateTableResults.length}]: ${utils.stringifyReduce(stateTableResults)}`) - - // this.stateManager.transactionQueue.applySoftLock = false - // // only write our state table data if we dont already have it in the db - // if (hasStateTableData === false) { - // for (let stateT of stateTableResults) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log('writeStateTable ' + utils.makeShortHash(stateT.accountId) + ' accounts total' + accountDataList.length) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('writeStateTable ' + utils.makeShortHash(stateT.accountId) + ' before: ' + utils.makeShortHash(stateT.stateBefore) + ' after: ' + utils.makeShortHash(stateT.stateAfter) + ' txid: ' + utils.makeShortHash(acceptedTX.id) + ' ts: ' + acceptedTX.timestamp) - // } - // await this.storage.addAccountStates(stateTableResults) - // } - - // // post validate that state ended up correctly? - - // // write the accepted TX to storage - // this.storage.addAcceptedTransactions([acceptedTX]) - // } catch (ex) { - // this.statemanager_fatal(`tryApplyTransaction_ex`, 'tryApplyTransaction failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // this.mainLogger.debug(`tryApplyTransaction failed id:${utils.makeShortHash(acceptedTX.id)} ${utils.stringifyReduce(acceptedTX)}`) - // if (applyResponse) { - // // && savedSomething){ - // // TSConversion do we really want to record this? - // // if (!repairing) this.stateManager.partitionObjects.tempRecordTXByCycle(txTs, acceptedTX, false, applyResponse, isGlobalModifyingTX, savedSomething) - // // record no-op state table fail: - // } else { - // // this.fatalLogger.fatal('tryApplyTransaction failed: applyResponse == null') - // } - - // return false - // } finally { - // this.stateManager.fifoUnlock('accountModification', ourLockID) - // if (repairing !== true) { - // if (ourAccountLocks != null) { - // this.stateManager.bulkFifoUnlockAccounts(accountKeys, ourAccountLocks) - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair tryApplyTransaction FIFO unlock inner: ${utils.stringifyReduce(accountKeys)} ourLocks: ${utils.stringifyReduce(ourAccountLocks)}`) - // } - // } - - // // have to wrestle with the data a bit so we can backup the full account and not jsut the partial account! - // // let dataResultsByKey = {} - // let dataResultsFullList = [] - // for (let wrappedData of applyResponse.accountData) { - // // if (wrappedData.isPartial === false) { - // // dataResultsFullList.push(wrappedData.data) - // // } else { - // // dataResultsFullList.push(wrappedData.localCache) - // // } - // if (wrappedData.localCache != null) { - // dataResultsFullList.push(wrappedData) - // } - // // dataResultsByKey[wrappedData.accountId] = wrappedData.data - // } - - // // this is just for debug!!! - // if (dataResultsFullList[0] == null) { - // for (let wrappedData of applyResponse.accountData) { - // if (wrappedData.localCache != null) { - // dataResultsFullList.push(wrappedData) - // } - // // dataResultsByKey[wrappedData.accountId] = wrappedData.data - // } - // } - // // if(dataResultsFullList == null){ - // // throw new Error(`tryApplyTransaction (dataResultsFullList == null ${txTs} ${utils.stringifyReduce(acceptedTX)} `); - // // } - - // // TSConversion verified that app.setAccount calls shardus.applyResponseAddState that adds hash and txid to the data and turns it into AccountData - // let upgradedAccountDataList: Shardus.AccountData[] = (dataResultsFullList as unknown) as Shardus.AccountData[] - - // await this.stateManager.updateAccountsCopyTable(upgradedAccountDataList, repairing, txTs) - - // if (!repairing) { - // //if(savedSomething){ - // this.stateManager.partitionObjects.tempRecordTXByCycle(txTs, acceptedTX, true, applyResponse, isGlobalModifyingTX, savedSomething) - // //} - - // //WOW this was not good! had acceptedTX.transactionGroup[0].id - // //if (this.p2p.getNodeId() === acceptedTX.transactionGroup[0].id) { - - // let queueEntry: QueueEntry | null = this.stateManager.transactionQueue.getQueueEntry(acceptedTX.id) - // if (queueEntry != null && queueEntry.transactionGroup != null && this.p2p.getNodeId() === queueEntry.transactionGroup[0].id) { - // this.stateManager.eventEmitter.emit('txProcessed') - // } - - // this.stateManager.eventEmitter.emit('txApplied', acceptedTX) - // } - - // return true - // } - - /*** - * ######## ### ######## ######## #### ######## #### ####### ## ## ####### ######## ## ######## ###### ######## ###### ## - * ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## #### - * ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## - * ######## ## ## ######## ## ## ## ## ## ## ## ## ## ## ## ######## ## ###### ## ## ###### - * ## ######### ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## #### - * ## ## ## ## ## ## #### ## #### ####### ## ## ####### ######## ###### ######## ###### ## ###### ## - */ - - /*** - * ######## ### ######## ######## #### ######## #### ####### ## ## ####### ######## ## ######## ###### ######## ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## - * ######## ## ## ######## ## ## ## ## ## ## ## ## ## ## ## ######## ## ###### ## ## ###### - * ## ######### ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## #### ## #### ####### ## ## ####### ######## ###### ######## ###### ## ###### - */ - - // /** - // * generatePartitionObjects - // * @param {Cycle} lastCycle - // */ - // generatePartitionObjects(lastCycle: Shardus.Cycle) { - // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(lastCycle.counter) - - // // let partitions = ShardFunctions.getConsenusPartitions(lastCycleShardValues.shardGlobals, lastCycleShardValues.nodeShardData) - // // lastCycleShardValues.ourConsensusPartitions = partitions - - // if (lastCycleShardValues == null) { - // throw new Error('generatePartitionObjects lastCycleShardValues == null' + lastCycle.counter) - // } - - // let partitions = lastCycleShardValues.ourConsensusPartitions - // if (this.stateManager.useStoredPartitionsForReport === true) { - // partitions = lastCycleShardValues.ourStoredPartitions - // } - // if (partitions == null) { - // throw new Error('generatePartitionObjects partitions == null') - // } - - // if (this.stateManager.feature_useNewParitionReport === false) { - // this.nextCycleReportToSend = { res: [], cycleNumber: lastCycle.counter } - // } - - // let partitionObjects = [] - // let partitionResults = [] - // let cycleKey = 'c' + lastCycle.counter - // for (let partitionNumber of partitions) { - // // TODO sharding - done. when we add state sharding need to loop over partitions. - // let partitionObject = this.generatePartitionObject(lastCycle, partitionNumber) - - // // Nodes sign the partition hash along with the Partition_id, Cycle_number and timestamp to produce a partition result. - // let partitionResult = this.generatePartitionResult(partitionObject) - - // if (this.stateManager.feature_useNewParitionReport === false) { - // this.nextCycleReportToSend.res.push({ i: partitionResult.Partition_id, h: partitionResult.Partition_hash }) - // } - // // let partitionObjects = [partitionObject] - // // let partitionResults = [partitionResult] - - // // this.partitionObjectsByCycle[cycleKey] = partitionObjects - // // this.ourPartitionResultsByCycle[cycleKey] = partitionResults // todo in the future there could be many results (one per covered partition) - - // partitionObjects.push(partitionObject) - // partitionResults.push(partitionResult) - - // this.partitionObjectsByCycle[cycleKey] = partitionObjects - // this.ourPartitionResultsByCycle[cycleKey] = partitionResults - - // this.poMicroDebug(partitionObject) - - // let partitionResultsByHash = this.recentPartitionObjectsByCycleByHash[cycleKey] - // if (partitionResultsByHash == null) { - // partitionResultsByHash = {} - // this.recentPartitionObjectsByCycleByHash[cycleKey] = partitionResultsByHash - // } - // // todo sharding done? seems ok : need to loop and put all results in this list - // // todo perf, need to clean out data from older cycles.. - // partitionResultsByHash[partitionResult.Partition_hash] = partitionObject - // } - - // // outside of the main loop - // // add our result to the list of all other results - // let responsesByPartition = this.allPartitionResponsesByCycleByPartition[cycleKey] - // if (!responsesByPartition) { - // responsesByPartition = {} - // this.allPartitionResponsesByCycleByPartition[cycleKey] = responsesByPartition - // } - - // // this part should be good to go for sharding. - // for (let pResult of partitionResults) { - // let partitionKey = 'p' + pResult.Partition_id - // let responses = responsesByPartition[partitionKey] - // if (!responses) { - // responses = [] - // responsesByPartition[partitionKey] = responses - // } - // let ourID = this.crypto.getPublicKey() - // // clean out an older response from same node if on exists - // responses = responses.filter((item) => item.sign && item.sign.owner !== ourID) // if the item is not signed clear it! - // responsesByPartition[partitionKey] = responses // have to re-assign this since it is a new ref to the array - // responses.push(pResult) - // } - - // // return [partitionObject, partitionResult] - // } - - // /** - // * generatePartitionResult - // * @param {PartitionObject} partitionObject - // * @returns {PartitionResult} - // */ - // generatePartitionResult(partitionObject: PartitionObject): PartitionResult { - // let tempStates = partitionObject.States - // partitionObject.States = [] - // let partitionHash = /** @type {string} */ this.crypto.hash(partitionObject) - // partitionObject.States = tempStates //Temp fix. do not record states as part of hash (for now) - - // /** @type {PartitionResult} */ - // let partitionResult = { Partition_hash: partitionHash, Partition_id: partitionObject.Partition_id, Cycle_number: partitionObject.Cycle_number, hashSet: '' } - - // // let stepSize = cHashSetStepSize - // if (this.stateManager.useHashSets) { - // let hashSet = Depricated.createHashSetString(partitionObject.Txids, partitionObject.States) // TXSTATE_TODO - // partitionResult.hashSet = hashSet - // } - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair partitionObject: ${utils.stringifyReduce(partitionObject)}`) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair generatePartitionResult: ${utils.stringifyReduce(partitionResult)}`) - - // if (partitionObject.Txids && partitionObject.Txids.length > 0) { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('partitionObject', 'c' + partitionObject.Cycle_number, partitionObject) - // } - // // nodeid in form of the signer! - // return partitionResult - // } - - // /** - // * generatePartitionObject - // * @param {Cycle} lastCycle todo define cycle!! - // * @param {number} partitionId - // * @returns {PartitionObject} - // */ - // generatePartitionObject(lastCycle: Cycle, partitionId: number) { - // let txList = this.getTXList(lastCycle.counter, partitionId) - - // let txSourceData = txList - // if (txList.newTxList) { - // // TSConversion this forced us to add processed to newTxList. probably a good fis for an oversight - // txSourceData = txList.newTxList - // } - - // /** @type {PartitionObject} */ - // let partitionObject = { - // Partition_id: partitionId, - // Partitions: 1, - // Cycle_number: lastCycle.counter, - // Cycle_marker: lastCycle.marker, - // Txids: txSourceData.hashes, // txid1, txid2, …], - ordered from oldest to recent - // Status: txSourceData.passed, // [1,0, …], - ordered corresponding to Txids; 1 for applied; 0 for failed - // States: txSourceData.states, // array of array of states - // Chain: [], // [partition_hash_341, partition_hash_342, partition_hash_343, …] - // // TODO prodution need to implment chain logic. Chain logic is important for making a block chain out of are partition objects - // } - // return partitionObject - // } - - // /** - // * partitionObjectToTxMaps - // * @param {PartitionObject} partitionObject - // * @returns {Object.} - // */ - // partitionObjectToTxMaps(partitionObject: PartitionObject): StatusMap { - // let statusMap: StatusMap = {} - // for (let i = 0; i < partitionObject.Txids.length; i++) { - // let tx = partitionObject.Txids[i] - // let status = partitionObject.Status[i] - // statusMap[tx] = status - // } - // return statusMap - // } - - // /** - // * partitionObjectToStateMaps - // * @param {PartitionObject} partitionObject - // * @returns {Object.} - // */ - // partitionObjectToStateMaps(partitionObject: PartitionObject): StateMap { - // let statusMap: StateMap = {} - // for (let i = 0; i < partitionObject.Txids.length; i++) { - // let tx = partitionObject.Txids[i] - // let state = partitionObject.States[i] - // statusMap[tx] = state - // } - // return statusMap - // } - - // /** - // * tryGeneratePartitionReciept - // * Generate a receipt if we have consensus - // * @param {PartitionResult[]} allResults - // * @param {PartitionResult} ourResult - // * @param {boolean} [repairPassHack] - // * @returns {{ partitionReceipt: PartitionReceipt; topResult: PartitionResult; success: boolean }} - // */ - // tryGeneratePartitionReciept(allResults: PartitionResult[], ourResult: PartitionResult, repairPassHack = false) { - // let partitionId = ourResult.Partition_id - // let cycleCounter = ourResult.Cycle_number - - // let key = 'c' + cycleCounter - // let key2 = 'p' + partitionId - // let debugKey = `rkeys: ${key} ${key2}` - - // let repairTracker = this.stateManager.depricated._getRepairTrackerForCycle(cycleCounter, partitionId) - // repairTracker.busy = true // mark busy so we won't try to start this task again while in the middle of it - - // // Tried hashes is not working correctly at the moment, it is an unused parameter. I am not even sure we want to ignore hashes - // let { topHash, topCount, topResult } = this.stateManager.depricated.findMostCommonResponse(cycleCounter, partitionId, repairTracker.triedHashes) - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept repairTracker: ${utils.stringifyReduce(repairTracker)} other: ${utils.stringifyReduce({ topHash, topCount, topResult })}`) - - // let requiredHalf = Math.max(1, allResults.length / 2) - // if (this.stateManager.useHashSets && repairPassHack) { - // // hack force our node to win: - // topCount = requiredHalf - // topHash = ourResult.Partition_hash - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept hack force win: ${utils.stringifyReduce(repairTracker)} other: ${utils.stringifyReduce({ topHash, topCount, topResult })}`) - // } - - // let resultsList = [] - // if (topCount >= requiredHalf) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept: top hash wins: ` + utils.makeShortHash(topHash) + ` ourResult: ${utils.makeShortHash(ourResult.Partition_hash)} count/required ${topCount} / ${requiredHalf}`) - // for (let partitionResult of allResults) { - // if (partitionResult.Partition_hash === topHash) { - // resultsList.push(partitionResult) - // } - // } - // } else { - // if (this.stateManager.useHashSets) { - // // bail in a way that will cause us to use the hashset strings - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept: did not win, useHashSets: ` + utils.makeShortHash(topHash) + ` ourResult: ${utils.makeShortHash(ourResult.Partition_hash)} count/required ${topCount} / ${requiredHalf}`) - // return { partitionReceipt: null, topResult: null, success: false } - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept: top hash failed: ` + utils.makeShortHash(topHash) + ` ${topCount} / ${requiredHalf}`) - // return { partitionReceipt: null, topResult, success: false } - // } - - // if (ourResult.Partition_hash !== topHash) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept: our hash does not match: ` + utils.makeShortHash(topHash) + ` our hash: ${ourResult.Partition_hash}`) - // return { partitionReceipt: null, topResult, success: false } - // } - - // let partitionReceipt = { - // resultsList, - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair ${debugKey} tryGeneratePartitoinReciept OK! ${utils.stringifyReduce({ partitionReceipt, topResult })}`) - - // return { partitionReceipt, topResult, success: true } - // } - - // /** - // * startRepairProcess - // * @param {Cycle} cycle - // * @param {PartitionResult} topResult - // * @param {number} partitionId - // * @param {string} ourLastResultHash - // */ - // async startRepairProcess(cycle: Cycle, topResult: PartitionResult | null, partitionId: number, ourLastResultHash: string) { - // // todo update stateIsGood to follow a new metric based on the new data repair. - // this.stateManager.stateIsGood_txHashsetOld = false - // if (this.stateManager.canDataRepair === false) { - // // todo fix false negative results. This may require inserting - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(`data oos detected. (old system) False negative results given if syncing. cycle: ${cycle.counter} partition: ${partitionId} `) - // return - // } - // return - // } - - // // todo refactor some of the duped code in here - // // possibly have to split this into three functions to make that clean (find our result and the parition checking as sub funcitons... idk) - // /** - // * checkForGoodPartitionReciept - // * - // * this is part of the old partition tracking and is only used for debugging now. - // * - // * @param {number} cycleNumber - // * @param {number} partitionId - // */ - // async checkForGoodPartitionReciept(cycleNumber: number, partitionId: number) { - // // let repairTracker = this.stateManager.depricated._getRepairTrackerForCycle(cycleNumber, partitionId) - - // let key = 'c' + cycleNumber - // let key2 = 'p' + partitionId - // let debugKey = `rkeys: ${key} ${key2}` - - // // get responses - // let responsesById = this.allPartitionResponsesByCycleByPartition[key] - // let responses = responsesById[key2] - - // // find our result - // let ourPartitionValues = this.ourPartitionResultsByCycle[key] - // let ourResult = null - // for (let obj of ourPartitionValues) { - // if (obj.Partition_id === partitionId) { - // ourResult = obj - // break - // } - // } - // if (ourResult == null) { - // throw new Error(`checkForGoodPartitionReciept ourResult == null ${debugKey}`) - // } - // let receiptResults = this.tryGeneratePartitionReciept(responses, ourResult) // TODO: how to mark block if we are already on a thread for this? - // let { partitionReceipt: partitionReceipt3, topResult: topResult3, success: success3 } = receiptResults - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair checkForGoodPartitionReciept immediate receipt check. ${debugKey} success:${success3} topResult:${utils.stringifyReduce(topResult3)} partitionReceipt: ${utils.stringifyReduce({ partitionReceipt3 })}`) - - // // see if we already have a winning hash to correct to - // if (!success3) { - // // if (repairTracker.awaitWinningHash) { - // // if (topResult3 == null) { - // // // if we are awaitWinningHash then wait for a top result before we start repair process again - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair checkForGoodPartitionReciept awaitWinningHash:true but topResult == null so keep waiting ${debugKey}`) - // // } else { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair checkForGoodPartitionReciept awaitWinningHash:true and we have a top result so start reparing! ${debugKey}`) - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair checkForGoodPartitionReciept: tryGeneratePartitionReciept failed start repair process 3 ${debugKey} ${utils.stringifyReduce(receiptResults)}`) - // // let cycle = this.p2p.state.getCycleByCounter(cycleNumber) - // // await utils.sleep(1000) - // // await this.startRepairProcess(cycle, topResult3, partitionId, ourResult.Partition_hash) - // // // we are correcting to another hash. don't bother sending our hash out - // // } - // // } - // } else { - // if (partitionReceipt3 == null) { - // throw new Error(`checkForGoodPartitionReciept partitionReceipt3 == null ${debugKey}`) - // } - // this.stateManager.storePartitionReceipt(cycleNumber, partitionReceipt3) - // // this.stateManager.depricated.repairTrackerMarkFinished(repairTracker, 'checkForGoodPartitionReciept') - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair checkForGoodPartitionReciept 2 allFinished, final ${debugKey} hash:${utils.stringifyReduce({ topResult3 })}`) - // } - // } - - // /** - // * tempRecordTXByCycle - // * we dont have a cycle yet to save these records against so store them in a temp place - // * @param {number} txTS - // * @param {AcceptedTx} acceptedTx - // * @param {boolean} passed - // * @param {ApplyResponse} applyResponse - // * @param {boolean} isGlobalModifyingTX - // */ - // tempRecordTXByCycle(txTS: number, acceptedTx: AcceptedTx, passed: boolean, applyResponse: ApplyResponse, isGlobalModifyingTX: boolean, savedSomething: boolean) { - // this.tempTXRecords.push({ txTS, acceptedTx, passed, redacted: -1, applyResponse, isGlobalModifyingTX, savedSomething }) - // } - - // /** - // * sortTXRecords - // * @param {TempTxRecord} a - // * @param {TempTxRecord} b - // * @returns {number} - // */ - // sortTXRecords(a: TempTxRecord, b: TempTxRecord): number { - // if (a.acceptedTx.timestamp === b.acceptedTx.timestamp) { - // return utils.sortAsc(a.acceptedTx.id, b.acceptedTx.id) - // } - // //return a.acceptedTx.timestamp - b.acceptedTx.timestamp - // return a.acceptedTx.timestamp > b.acceptedTx.timestamp ? -1 : 1 - // } - - // /** - // * processTempTXs - // * call this before we start computing partitions so that we can make sure to get the TXs we need out of the temp list - // * @param {Cycle} cycle - // */ - // processTempTXs(cycle: Cycle) { - // if (!this.tempTXRecords) { - // return - // } - // let txsRecorded = 0 - // let txsTemp = 0 - - // let newTempTX = [] - // let cycleEnd = (cycle.start + cycle.duration) * 1000 - // cycleEnd -= this.stateManager.syncSettleTime // adjust by sync settle time - - // // sort our records before recording them! - // this.tempTXRecords.sort(this.sortTXRecords) - - // //savedSomething - - // for (let txRecord of this.tempTXRecords) { - // if (txRecord.redacted > 0) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair recordTXByCycle: ${utils.makeShortHash(txRecord.acceptedTx.id)} cycle: ${cycle.counter} redacted!!! ${txRecord.redacted}`) - // continue - // } - // if (txRecord.txTS < cycleEnd) { - // this.recordTXByCycle(txRecord.txTS, txRecord.acceptedTx, txRecord.passed, txRecord.applyResponse, txRecord.isGlobalModifyingTX) - // txsRecorded++ - // } else { - // newTempTX.push(txRecord) - // txsTemp++ - // } - // } - - // this.tempTXRecords = newTempTX - - // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(cycle.counter) - - // if (lastCycleShardValues == null) { - // throw new Error('processTempTXs lastCycleShardValues == null') - // } - // if (lastCycleShardValues.ourConsensusPartitions == null) { - // throw new Error('processTempTXs ourConsensusPartitions == null') - // } - // // lastCycleShardValues.ourConsensusPartitions is not iterable - // for (let partitionID of lastCycleShardValues.ourConsensusPartitions) { - // let txList = this.getTXList(cycle.counter, partitionID) // todo sharding - done.: pass partition ID - - // txList.processed = true - // } - - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair processTempTXs txsRecorded: ${txsRecorded} txsTemp: ${txsTemp} `) - // } - - // // TODO sharding done! need to split this out by partition - // /** - // * getTXList - // * @param {number} cycleNumber - // * @param {number} partitionId - // * @returns {TxTallyList} - // */ - // getTXList(cycleNumber: number, partitionId: number): TxTallyList { - // let key = 'c' + cycleNumber - // let txListByPartition = this.txByCycleByPartition[key] - // let pkey = 'p' + partitionId - // // now search for the correct partition - // if (!txListByPartition) { - // txListByPartition = {} - // this.txByCycleByPartition[key] = txListByPartition - // } - // let txList = txListByPartition[pkey] - // if (!txList) { - // txList = { hashes: [], passed: [], txs: [], processed: false, states: [] } // , txById: {} - // txListByPartition[pkey] = txList - // } - // return txList - // } - - // // take this tx and create if needed and object for the current cylce that holds a list of passed and failed TXs - // /** - // * recordTXByCycle - // * This function is only for building up txList as used by the features: stateIsGood_txHashsetOld, oldFeature_BroadCastPartitionReport, oldFeature_GeneratePartitionReport - // * @param {number} txTS - // * @param {AcceptedTx} acceptedTx - // * @param {boolean} passed - // * @param {ApplyResponse} applyResponse - // */ - // recordTXByCycle(txTS: number, acceptedTx: AcceptedTx, passed: boolean, applyResponse: ApplyResponse, isGlobalModifyingTX: boolean) { - // // TODO sharding. done because it uses getTXList . filter TSs by the partition they belong to. Double check that this is still needed - - // // get the cycle that this tx timestamp would belong to. - // // add in syncSettleTime when selecting which bucket to put a transaction in - // const cycle = this.p2p.state.getCycleByTimestamp(txTS + this.stateManager.syncSettleTime) - - // if (cycle == null) { - // this.mainLogger.error(`recordTXByCycle Failed to find cycle that would contain this timestamp txid:${utils.stringifyReduce(acceptedTx.id)} txts1: ${acceptedTx.timestamp} txts: ${txTS}`) - // return - // } - - // let cycleNumber = cycle.counter - - // // for each covered partition.. - - // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(cycle.counter) - - // let keysResponse = this.app.getKeyFromTransaction(acceptedTx.data) - // let { allKeys } = keysResponse - - // let seenParitions: StringBoolObjectMap = {} - // let partitionHasNonGlobal: StringBoolObjectMap = {} - // // for (let partitionID of lastCycleShardValues.ourConsensusPartitions) { - // if (lastCycleShardValues == null) { - // throw new Error(`recordTXByCycle lastCycleShardValues == null`) - // } - - // if (isGlobalModifyingTX) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: ignore loggging globalTX ${txQueueEntry.logID} cycle: ${cycleNumber}`) - // return - // } - - // let globalACC = 0 - // let nonGlobal = 0 - // let storedNonGlobal = 0 - // let storedGlobal = 0 - // //filter out stuff. - // if (isGlobalModifyingTX === false) { - // for (let accountKey of allKeys) { - // // HOMENODEMATHS recordTXByCycle: using partition to decide recording partition - // let { homePartition } = ShardFunctions.addressToPartition(lastCycleShardValues.shardGlobals, accountKey) - // let partitionID = homePartition - // let weStoreThisParition = ShardFunctions.testInRange(partitionID, lastCycleShardValues.nodeShardData.storedPartitions) - // let key = 'p' + partitionID - - // if (this.stateManager.accountGlobals.isGlobalAccount(accountKey)) { - // globalACC++ - - // if (weStoreThisParition === true) { - // storedGlobal++ - // } - // } else { - // nonGlobal++ - - // if (weStoreThisParition === true) { - // storedNonGlobal++ - // partitionHasNonGlobal[key] = true - // } - // } - // } - // } - - // if (storedNonGlobal === 0 && storedGlobal === 0) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: nothing to save globalAccounts: ${globalACC} nonGlobal: ${nonGlobal} storedNonGlobal:${storedNonGlobal} storedGlobal: ${storedGlobal} tx: ${txQueueEntry.logID} cycle: ${cycleNumber}`) - // return - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: globalAccounts: ${globalACC} nonGlobal: ${nonGlobal} storedNonGlobal:${storedNonGlobal} storedGlobal: ${storedGlobal} tx: ${txQueueEntry.logID} cycle: ${cycleNumber}`) - - // for (let accountKey of allKeys) { - // /** @type {NodeShardData} */ - // let homeNode = ShardFunctions.findHomeNode(lastCycleShardValues.shardGlobals, accountKey, lastCycleShardValues.parititionShardDataMap) - // if (homeNode == null) { - // throw new Error(`recordTXByCycle homeNode == null`) - // } - // // HOMENODEMATHS recordTXByCycle: this code has moved to use homepartition instead of home node's partition - // let homeNodepartitionID = homeNode.homePartition - // let { homePartition } = ShardFunctions.addressToPartition(lastCycleShardValues.shardGlobals, accountKey) - // let partitionID = homePartition - // let key = 'p' + partitionID - - // if (this.stateManager.accountGlobals.isGlobalAccount(accountKey)) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: skip partition. dont save due to global: P: ${partitionID} homeNodepartitionID: ${homeNodepartitionID} acc: ${utils.makeShortHash(accountKey)} tx: ${txQueueEntry.logID} cycle: ${cycleNumber}`) - // continue - // } - - // let weStoreThisParition = ShardFunctions.testInRange(partitionID, lastCycleShardValues.nodeShardData.storedPartitions) - // if (weStoreThisParition === false) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: skip partition we dont save: P: ${partitionID} homeNodepartitionID: ${homeNodepartitionID} acc: ${utils.makeShortHash(accountKey)} tx: ${txQueueEntry.logID} cycle: ${cycleNumber}`) - - // continue - // } - - // if (partitionHasNonGlobal[key] === false) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: skip partition. we store it but only a global ref involved this time: P: ${partitionID} homeNodepartitionID: ${homeNodepartitionID} acc: ${utils.makeShortHash(accountKey)} tx: ${txQueueEntry.logID} cycle: ${cycleNumber}`) - - // continue - // } - // //check if we are only storing this because it is a global account... - - // let txList = this.getTXList(cycleNumber, partitionID) // todo sharding - done: pass partition ID - - // if (txList.processed) { - // continue - // //this.mainLogger.error(`_repair trying to record transaction after we have already finalized our parition object for cycle ${cycle.counter} `) - // } - - // if (seenParitions[key] != null) { - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`recordTXByCycle: seenParitions[key] != null P: ${partitionID} homeNodepartitionID: ${homeNodepartitionID} acc: ${utils.makeShortHash(accountKey)} tx: ${txQueueEntry.logID} cycle: ${cycleNumber} entries: ${txList.hashes.length} --TX already recorded for cycle`) - // // skip because this partition already has this TX! - // continue - // } - // seenParitions[key] = true - - // txList.hashes.push(acceptedTx.id) - // txList.passed.push(passed ? 1 : 0) - // txList.txs.push(acceptedTx) - - // let recordedState = false - // if (applyResponse != null && applyResponse.accountData != null) { - // let states = [] - // let foundAccountIndex = 0 - // let index = 0 - // for (let accountData of applyResponse.accountData) { - // if (accountData.accountId === accountKey) { - // foundAccountIndex = index - // } - // //states.push(utils.makeShortHash(accountData.hash)) // TXSTATE_TODO need to get only certain state data!.. hash of local states? - // // take a look at backup data? - - // //TSConversion some uncertainty with around hash being on the data or not. added logggin. - // // // @ts-ignore - // // if(accountData.hash != null){ - // // // @ts-ignore - // // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug( ` _repair recordTXByCycle: how is this possible: ${utils.makeShortHash(accountData.accountId)} acc hash: ${utils.makeShortHash(accountData.hash)} acc stateID: ${utils.makeShortHash(accountData.stateId)}`) - - // // } - // // if(accountData.stateId == null){ - // // // @ts-ignore - // // throw new Error(`missing state id for ${utils.makeShortHash(accountData.accountId)} acc hash: ${utils.makeShortHash(accountData.hash)} acc stateID: ${utils.makeShortHash(accountData.stateId)} `) - // // } - - // // account data got upgraded earlier to have hash on it - - // //if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug( `recordTXByCycle: Pushed! P: ${partitionID} acc: ${utils.makeShortHash(accountKey)} tx: ${txQueueEntry.logID} cycle: ${cycleNumber} entries: ${txList.hashes.length} --TX already recorded for cycle`) - - // states.push(utils.makeShortHash(((accountData as unknown) as Shardus.AccountData).hash)) - // index++ - // recordedState = true - // } - // txList.states.push(states[foundAccountIndex]) // TXSTATE_TODO does this check out? - // } else { - // txList.states.push('xxxx') - // } - // // txList.txById[acceptedTx.id] = acceptedTx - // // TODO sharding perf. need to add some periodic cleanup when we have more cycles than needed stored in this map!!! - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair recordTXByCycle: pushedData P: ${partitionID} homeNodepartitionID: ${homeNodepartitionID} acc: ${utils.makeShortHash(accountKey)} tx: ${txQueueEntry.logID} cycle: ${cycleNumber} entries: ${txList.hashes.length} recordedState: ${recordedState}`) - // } - // } - - /*** - * ######## ######## ####### ### ######## ###### ### ###### ######## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ######## ######## ## ## ## ## ## ## ## ## ## ###### ## - * ## ## ## ## ## ## ######### ## ## ## ######### ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ######## ## ## ####### ## ## ######## ###### ## ## ###### ## - */ - // /** - // * broadcastPartitionResults - // * @param {number} cycleNumber - // */ - // async broadcastPartitionResults(cycleNumber: number) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair broadcastPartitionResults for cycle: ${cycleNumber}`) - // // per partition need to figure out which node cover it. - // // then get a list of all the results we need to send to a given node and send them at once. - // // need a way to do this in semi parallel? - // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(cycleNumber) - // let partitionResults = this.ourPartitionResultsByCycle['c' + cycleNumber] - // let partitionResultsByNodeID = new Map() // use a map? - // let nodesToTell = [] - - // if (lastCycleShardValues == null) { - // throw new Error(`broadcastPartitionResults lastCycleShardValues == null ${cycleNumber}`) - // } - // // sign results as needed - // for (let i = 0; i < partitionResults.length; i++) { - // /** @type {PartitionResult} */ - // let partitionResult = partitionResults[i] - // if (!partitionResult.sign) { - // partitionResult = this.crypto.sign(partitionResult) - // } - - // //check if we are syncing that cycle if so don't send out info on it! - // // if(this.getSyncTrackerForParition(partitionResult.Partition_id, lastCycleShardValues)) { - // // /* prettier-ignore */ if (logFlags.verbose ) this.mainLogger.debug( `broadcastPartitionResults skipped because parition is syncing ${partitionResult.Partition_id}`) - // // continue - // // } - - // // if(lastCycleShardValues.partitionsToSkip.has(partitionResult.Partition_id) === true){ - // // /* prettier-ignore */ if (logFlags.verbose ) this.mainLogger.debug( `broadcastPartitionResults skipped because parition is syncing ${partitionResult.Partition_id}`) - // // continue - // // } - - // //if there is any tx that gets a slow down need to mark it. - - // /** @type {ShardInfo} */ - // let partitionShardData = lastCycleShardValues.parititionShardDataMap.get(partitionResult.Partition_id) - // // calculate nodes that care about this partition here - // // since we are using store partitions use storedBy - // // if we transfer back to covered partitions can switch back to coveredBy - // let coverCount = 0 - // for (let nodeId in partitionShardData.storedBy) { - // if (partitionShardData.storedBy.hasOwnProperty(nodeId)) { - // // Test if node is active!! - // let possibleNode = partitionShardData.storedBy[nodeId] - - // if (possibleNode.status !== 'active') { - // // don't count non active nodes for participating in the system. - // continue - // } - - // coverCount++ - // let partitionResultsToSend - // // If we haven't recorded this node yet create a new results object for it - // if (partitionResultsByNodeID.has(nodeId) === false) { - // nodesToTell.push(nodeId) - // partitionResultsToSend = { results: [], node: partitionShardData.storedBy[nodeId], debugStr: `c${partitionResult.Cycle_number} ` } - // partitionResultsByNodeID.set(nodeId, partitionResultsToSend) - // } - // partitionResultsToSend = partitionResultsByNodeID.get(nodeId) - // partitionResultsToSend.results.push(partitionResult) - // partitionResultsToSend.debugStr += `p${partitionResult.Partition_id} ` - // } - // } - - // // let repairTracker = this.stateManager.depricated._getRepairTrackerForCycle(cycleNumber, partitionResult.Partition_id) - // // repairTracker.numNodes = coverCount - 1 // todo sharding re-evaluate this and thing of a better perf solution - // } - - // let promises = [] - // for (let nodeId of nodesToTell) { - // if (nodeId === lastCycleShardValues.ourNode.id) { - // continue - // } - // let partitionResultsToSend = partitionResultsByNodeID.get(nodeId) - // let payload = { Cycle_number: cycleNumber, partitionResults: partitionResultsToSend.results } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair broadcastPartitionResults to ${nodeId} debugStr: ${partitionResultsToSend.debugStr} res: ${utils.stringifyReduce(payload)}`) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair broadcastPartitionResults to ${nodeId} debugStr: ${partitionResultsToSend.debugStr} res: ${utils.stringifyReduce(payload)}`) - - // let shorthash = utils.makeShortHash(partitionResultsToSend.node.id) - // let toNodeStr = shorthash + ':' + partitionResultsToSend.node.externalPort - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('broadcastPartitionResults', `${cycleNumber}`, `to ${toNodeStr} ${partitionResultsToSend.debugStr} `) - - // // Filter nodes before we send tell() - // let filteredNodes = this.stateManager.filterValidNodesForInternalMessage([partitionResultsToSend.node], 'tellCorrespondingNodes', true, true) - // if (filteredNodes.length === 0) { - // this.mainLogger.error('broadcastPartitionResults: filterValidNodesForInternalMessage skipping node') - // continue //only doing one node at a time in this loop so just skip to next node. - // } - - // let promise = this.p2p.tell([partitionResultsToSend.node], 'post_partition_results', payload) - // promises.push(promise) - // } - - // await Promise.all(promises) - // } - - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// - /*** - * ## ## ######## ## ## ######## ### ###### ######## ###### ## ## ## ## ###### - * ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## - * #### ## ## ## ## ## ## ## ## ## ## ## #### #### ## ## - * ## ## ## ###### ## ## ## ###### ## ## ###### ## ###### ## ## ## ## ## - * ## #### ## ## ## ## ## ######### ## ## ## ## ## #### ## - * ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## - * ## ## ######## ### ### ## ## ## ###### ## ###### ## ## ## ###### - */ - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // async syncStateDataFast(requiredNodeCount: number) { - // // Dont sync if first node - // if (this.p2p.isFirstSeed) { - // this.dataSyncMainPhaseComplete = true - // this.syncStatement.syncComplete = true - // this.globalAccountsSynced = true - // this.stateManager.accountGlobals.hasknownGlobals = true - // this.readyforTXs = true - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: isFirstSeed = true. skipping sync`) - // return - // } - - // this.isSyncingAcceptedTxs = true - - // //await utils.sleep(5000) // Temporary delay to make it easier to attach a debugger - // if (logFlags.console) console.log('syncStateData start') - // // delete and re-create some tables before we sync: - // await this.storage.clearAppRelatedState() - // await this.app.deleteLocalAccountData() - - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: starting syncStateDataFast`) - - // this.requiredNodeCount = requiredNodeCount - - // let hasValidShardData = this.stateManager.currentCycleShardData != null - // if (this.stateManager.currentCycleShardData != null) { - // hasValidShardData = this.stateManager.currentCycleShardData.hasCompleteData - // } - // while (hasValidShardData === false) { - // this.stateManager.getCurrentCycleShardData() - // await utils.sleep(1000) - // if (this.stateManager.currentCycleShardData == null) { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_waitForShardData', ` `, ` ${utils.stringifyReduce(this.stateManager.currentCycleShardData)} `) - // hasValidShardData = false - // } - // if (this.stateManager.currentCycleShardData != null) { - // if (this.stateManager.currentCycleShardData.hasCompleteData == false) { - // let temp = this.p2p.state.getActiveNodes(null) - // if (logFlags.playback) - // this.logger.playbackLogNote( - // 'shrd_sync_waitForShardData', - // ` `, - // `hasCompleteData:${this.stateManager.currentCycleShardData.hasCompleteData} active:${utils.stringifyReduce(temp)} ${utils.stringifyReduce(this.stateManager.currentCycleShardData)} ` - // ) - // } else { - // hasValidShardData = true - // } - // } - // } - // let nodeShardData = this.stateManager.currentCycleShardData.nodeShardData - // /* prettier-ignore */ if (logFlags.console) console.log('GOT current cycle ' + ' time:' + utils.stringifyReduce(nodeShardData)) - - // let rangesToSync = [] as AddressRange[] - - // let cycle = this.stateManager.currentCycleShardData.cycleNumber - - // let homePartition = nodeShardData.homePartition - - // /* prettier-ignore */ if (logFlags.console) console.log(`homePartition: ${homePartition} storedPartitions: ${utils.stringifyReduce(nodeShardData.storedPartitions)}`) - - // let chunksGuide = 4 - // let syncRangeGoal = Math.max(1, Math.min(chunksGuide, Math.floor(this.stateManager.currentCycleShardData.shardGlobals.numPartitions / chunksGuide))) - // let partitionsCovered = 0 - // let partitionsPerRange = 1 - - // if (nodeShardData.storedPartitions.rangeIsSplit === true) { - // partitionsCovered = nodeShardData.storedPartitions.partitionEnd1 - nodeShardData.storedPartitions.partitionStart1 - // partitionsCovered += nodeShardData.storedPartitions.partitionEnd2 - nodeShardData.storedPartitions.partitionStart2 - // partitionsPerRange = Math.max(Math.floor(partitionsCovered / syncRangeGoal), 1) - // if (logFlags.console) console.log( - // `syncRangeGoal ${syncRangeGoal} chunksGuide:${chunksGuide} numPartitions:${this.stateManager.currentCycleShardData.shardGlobals.numPartitions} partitionsPerRange:${partitionsPerRange}` - // ) - - // let start = nodeShardData.storedPartitions.partitionStart1 - // let end = nodeShardData.storedPartitions.partitionEnd1 - // let currentStart = start - // let currentEnd = 0 - // let nextLowAddress: string | null = null - // let i = 0 - // while (currentEnd < end) { - // currentEnd = Math.min(currentStart + partitionsPerRange, end) - // let range = ShardFunctions.partitionToAddressRange2(this.stateManager.currentCycleShardData.shardGlobals, currentStart, currentEnd) - - // let { address1, address2 } = ShardFunctions.getNextAdjacentAddresses(range.high) - // range.high = address1 - - // if (nextLowAddress != null) { - // range.low = nextLowAddress - // } - // /* prettier-ignore */ if (logFlags.console) console.log(`range ${i} s:${currentStart} e:${currentEnd} h: ${homePartition} a1: ${range.low} a2: ${range.high}`) - // nextLowAddress = address2 - // currentStart = currentEnd - // i++ - // rangesToSync.push(range) - // } - - // start = nodeShardData.storedPartitions.partitionStart2 - // end = nodeShardData.storedPartitions.partitionEnd2 - // currentStart = start - // currentEnd = 0 - // nextLowAddress = null - - // while (currentEnd < end) { - // currentEnd = Math.min(currentStart + partitionsPerRange, end) - // let range = ShardFunctions.partitionToAddressRange2(this.stateManager.currentCycleShardData.shardGlobals, currentStart, currentEnd) - - // let { address1, address2 } = ShardFunctions.getNextAdjacentAddresses(range.high) - // range.high = address1 - - // if (nextLowAddress != null) { - // range.low = nextLowAddress - // } - // /* prettier-ignore */ if (logFlags.console) console.log(`range ${i} s:${currentStart} e:${currentEnd} h: ${homePartition} a1: ${range.low} a2: ${range.high}`) - - // nextLowAddress = address2 - // currentStart = currentEnd - // i++ - // rangesToSync.push(range) - // } - // } else { - // partitionsCovered = nodeShardData.storedPartitions.partitionEnd - nodeShardData.storedPartitions.partitionStart - // partitionsPerRange = Math.max(Math.floor(partitionsCovered / syncRangeGoal), 1) - // if (logFlags.console) console.log( - // `syncRangeGoal ${syncRangeGoal} chunksGuide:${chunksGuide} numPartitions:${this.stateManager.currentCycleShardData.shardGlobals.numPartitions} partitionsPerRange:${partitionsPerRange}` - // ) - - // let start = nodeShardData.storedPartitions.partitionStart - // let end = nodeShardData.storedPartitions.partitionEnd - - // let currentStart = start - // let currentEnd = 0 - // let nextLowAddress: string | null = null - // let i = 0 - // while (currentEnd < end) { - // currentEnd = Math.min(currentStart + partitionsPerRange, end) - // let range = ShardFunctions.partitionToAddressRange2(this.stateManager.currentCycleShardData.shardGlobals, currentStart, currentEnd) - - // let { address1, address2 } = ShardFunctions.getNextAdjacentAddresses(range.high) - // range.high = address1 - - // if (nextLowAddress != null) { - // range.low = nextLowAddress - // } - // /* prettier-ignore */ if (logFlags.console) console.log(`range ${i} s:${currentStart} e:${currentEnd} h: ${homePartition} a1: ${range.low} a2: ${range.high}`) - // nextLowAddress = address2 - // currentStart = currentEnd - // i++ - // rangesToSync.push(range) - // } - // } - - // // if we don't have a range to sync yet manually sync the whole range. - // if (rangesToSync.length === 0) { - // if (logFlags.console) console.log(`syncStateData ranges: pushing full range, no ranges found`) - // let range = ShardFunctions.partitionToAddressRange2(this.stateManager.currentCycleShardData.shardGlobals, 0, this.stateManager.currentCycleShardData.shardGlobals.numPartitions - 1) - // rangesToSync.push(range) - // } - // if (logFlags.console) console.log(`syncStateData ranges: ${utils.stringifyReduce(rangesToSync)}}`) - - // for (let range of rangesToSync) { - // // let nodes = ShardFunctions.getNodesThatCoverRange(this.stateManager.currentCycleShardData.shardGlobals, range.low, range.high, this.stateManager.currentCycleShardData.ourNode, this.stateManager.currentCycleShardData.activeNodes) - // this.createSyncTrackerByRange(range, cycle) - // } - - // this.createSyncTrackerByForGlobals(cycle) - - // // must get a list of globals before we can listen to any TXs, otherwise the isGlobal function returns bad values - // await this.stateManager.accountGlobals.getGlobalListEarly() - // this.readyforTXs = true - - // for (let syncTracker of this.syncTrackers) { - // // let partition = syncTracker.partition - // /* prettier-ignore */ if (logFlags.console) console.log(`syncTracker start. time:${Date.now()} data: ${utils.stringifyReduce(syncTracker)}}`) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_trackerRangeStart', ` `, ` ${utils.stringifyReduce(syncTracker.range)} `) - - // syncTracker.syncStarted = true - - // if (syncTracker.isGlobalSyncTracker === false) { - // await this.syncStateDataForRangeFast(syncTracker.range) - // } else { - // /* prettier-ignore */ if (logFlags.console) console.log(`syncTracker syncStateDataGlobals start. time:${Date.now()} data: ${utils.stringifyReduce(syncTracker)}}`) - // await this.syncStateDataGlobalsFast(syncTracker) - // } - // syncTracker.syncFinished = true - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_trackerRangeEnd', ` `, ` ${utils.stringifyReduce(syncTracker.range)} `) - // this.clearSyncData() - // } - // if (logFlags.console) console.log('syncStateData end' + ' time:' + Date.now()) - // } - - // async syncStateDataForRangeFast(range: SimpleRange) { - // try { - // let partition = 'notUsed' - // this.currentRange = range - // this.addressRange = range // this.partitionToAddressRange(partition) - - // this.partitionStartTimeStamp = Date.now() - - // let lowAddress = this.addressRange.low - // let highAddress = this.addressRange.high - - // partition = `${utils.stringifyReduce(lowAddress)} - ${utils.stringifyReduce(highAddress)}` - - // this.readyforTXs = true // open the floodgates of queuing stuffs. - - // await this.syncAccountDataFast(lowAddress, highAddress) - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: partition: ${partition}, syncAccountData done.`) - - // await this.processAccountDataFast() - // } catch (error) { - // if (error.message.includes('FailAndRestartPartition')) { - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: Error Failed at: ${error.stack}`) - // this.statemanager_fatal(`syncStateDataForRange_ex_failandrestart`, 'DATASYNC: FailAndRestartPartition: ' + errorToStringFull(error)) - // await this.failandRestart() - // } else { - // this.statemanager_fatal(`syncStateDataForRange_ex`, 'syncStateDataForPartition failed: ' + errorToStringFull(error)) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: unexpected error. restaring sync:` + errorToStringFull(error)) - // await this.failandRestart() - // } - // } - // } - - // async syncAccountDataFast(lowAddress: string, highAddress: string) { - // // Sync the Account data - // // Use the /get_account_data API to get the data from the Account Table using any of the nodes that had a matching hash - // if (logFlags.console) console.log(`syncAccountData3` + ' time:' + Date.now()) - - // if (this.config.stateManager == null) { - // throw new Error('this.config.stateManager == null') - // } - - // let queryLow = lowAddress - // let queryHigh = highAddress - - // let moreDataRemaining = true - - // this.combinedAccountData = [] - // let loopCount = 0 - - // let startTime = 0 - // let lowTimeQuery = startTime - // // this loop is required since after the first query we may have to adjust the address range and re-request to get the next N data entries. - // while (moreDataRemaining) { - // // Node Precheck! - // if (this.dataSourceNode == null || this.stateManager.isNodeValidForInternalMessage(this.dataSourceNode.id, 'syncAccountData', true, true) === false) { - // if (logFlags.verbose && this.dataSourceNode == null) { - // if (logFlags.error) this.mainLogger.error(`syncAccountDataFast this.dataSourceNode == null`) - // } - // if (this.tryNextDataSourceNode('syncAccountData') == false) { - // break - // } - // continue - // } - - // // max records artificially low to make testing coverage better. todo refactor: make it a config or calculate based on data size - // let message = { accountStart: queryLow, accountEnd: queryHigh, tsStart: startTime, maxRecords: this.config.stateManager.accountBucketSize } - // let r: GetAccountData3Resp | boolean = await this.p2p.ask(this.dataSourceNode, 'get_account_data3', message) // need the repeatable form... possibly one that calls apply to allow for datasets larger than memory - - // // TSConversion need to consider better error handling here! - // let result: GetAccountData3Resp = r as GetAccountData3Resp - - // if (result == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`ASK FAIL syncAccountData result == null node:${this.dataSourceNode.id}`) - // if (this.tryNextDataSourceNode('syncAccountData') == false) { - // break - // } - // continue - // } - // if (result.data == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`ASK FAIL syncAccountData result.data == null node:${this.dataSourceNode.id}`) - // if (this.tryNextDataSourceNode('syncAccountData') == false) { - // break - // } - // continue - // } - // // accountData is in the form [{accountId, stateId, data}] for n accounts. - // let accountData = result.data.wrappedAccounts - - // let lastUpdateNeeded = result.data.lastUpdateNeeded - - // // get the timestamp of the last account data received so we can use it as the low timestamp for our next query - // if (accountData.length > 0) { - // let lastAccount = accountData[accountData.length - 1] - // if (lastAccount.timestamp > lowTimeQuery) { - // lowTimeQuery = lastAccount.timestamp - // startTime = lowTimeQuery - // } - // } - - // // If this is a repeated query, clear out any dupes from the new list we just got. - // // There could be many rows that use the stame timestamp so we will search and remove them - // let dataDuplicated = true - // if (loopCount > 0) { - // while (accountData.length > 0 && dataDuplicated) { - // let stateData = accountData[0] - // dataDuplicated = false - // for (let i = this.combinedAccountData.length - 1; i >= 0; i--) { - // let existingStateData = this.combinedAccountData[i] - // if (existingStateData.timestamp === stateData.timestamp && existingStateData.accountId === stateData.accountId) { - // dataDuplicated = true - // break - // } - // // once we get to an older timestamp we can stop looking, the outer loop will be done also - // if (existingStateData.timestamp < stateData.timestamp) { - // break - // } - // } - // if (dataDuplicated) { - // accountData.shift() - // } - // } - // } - - // // if we have any accounts in wrappedAccounts2 - // let accountData2 = result.data.wrappedAccounts2 - // if (accountData2.length > 0) { - // while (accountData.length > 0 && dataDuplicated) { - // let stateData = accountData2[0] - // dataDuplicated = false - // for (let i = this.combinedAccountData.length - 1; i >= 0; i--) { - // let existingStateData = this.combinedAccountData[i] - // if (existingStateData.timestamp === stateData.timestamp && existingStateData.accountId === stateData.accountId) { - // dataDuplicated = true - // break - // } - // // once we get to an older timestamp we can stop looking, the outer loop will be done also - // if (existingStateData.timestamp < stateData.timestamp) { - // break - // } - // } - // if (dataDuplicated) { - // accountData2.shift() - // } - // } - // } - - // if (lastUpdateNeeded || (accountData2.length === 0 && accountData.length === 0)) { - // moreDataRemaining = false - // if (logFlags.debug) this.mainLogger.debug( - // `DATASYNC: syncAccountData3 got ${accountData.length} more records. last update: ${lastUpdateNeeded} extra records: ${result.data.wrappedAccounts2.length} tsStart: ${lowTimeQuery} highestTS1: ${result.data.highestTs}` - // ) - // if (accountData.length > 0) { - // this.combinedAccountData = this.combinedAccountData.concat(accountData) - // } - // if (accountData2.length > 0) { - // this.combinedAccountData = this.combinedAccountData.concat(accountData2) - // } - // } else { - // if (logFlags.debug) this.mainLogger.debug( - // `DATASYNC: syncAccountData3b got ${accountData.length} more records. last update: ${lastUpdateNeeded} extra records: ${result.data.wrappedAccounts2.length} tsStart: ${lowTimeQuery} highestTS1: ${result.data.highestTs}` - // ) - // this.combinedAccountData = this.combinedAccountData.concat(accountData) - // loopCount++ - // // await utils.sleep(500) - // } - // await utils.sleep(200) - // } - // } - - // async processAccountDataFast() { - // this.missingAccountData = [] - // this.mapAccountData = {} - // // create a fast lookup map for the accounts we have. Perf. will need to review if this fits into memory. May need a novel structure. - // let account - // for (let i = 0; i < this.combinedAccountData.length; i++) { - // account = this.combinedAccountData[i] - // this.mapAccountData[account.accountId] = account - // } - - // let accountKeys = Object.keys(this.mapAccountData) - // let uniqueAccounts = accountKeys.length - // let initialCombinedAccountLength = this.combinedAccountData.length - // if (uniqueAccounts < initialCombinedAccountLength) { - // // keep only the newest copies of each account: - // // we need this if using a time based datasync - // this.combinedAccountData = [] - // for (let accountID of accountKeys) { - // this.combinedAccountData.push(this.mapAccountData[accountID]) - // } - // } - - // let missingTXs = 0 - // let handledButOk = 0 - // let otherMissingCase = 0 - - // // For each account in the Account State Table make sure the entry in Account data has the same State_after value; if not save the account id to be looked up later - // this.accountsWithStateConflict = [] - // let goodAccounts: Shardus.WrappedData[] = [] - // let noSyncData = 0 - // let noMatches = 0 - // let outOfDateNoTxs = 0 - // for (let account of this.combinedAccountData) { - // delete account.syncData - // goodAccounts.push(account) - // } - // if (logFlags.debug) this.mainLogger.debug( - // `DATASYNC: processAccountData saving ${goodAccounts.length} of ${this.combinedAccountData.length} records to db. noSyncData: ${noSyncData} noMatches: ${noMatches} missingTXs: ${missingTXs} handledButOk: ${handledButOk} otherMissingCase: ${otherMissingCase} outOfDateNoTxs: ${outOfDateNoTxs}` - // ) - // // failedHashes is a list of accounts that failed to match the hash reported by the server - // let failedHashes = await this.stateManager.checkAndSetAccountData(goodAccounts, 'syncNonGlobals:processAccountData', true) // repeatable form may need to call this in batches - // //this.stateManager.partitionStats.statsDataSummaryInit(goodAccounts) - // if (failedHashes.length > 1000) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData failed hashes over 1000: ${failedHashes.length} restarting sync process`) - // // state -> try another node. TODO record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // throw new Error('FailAndRestartPartition_processAccountDataFast_A') - // } - // if (failedHashes.length > 0) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData failed hashes: ${failedHashes.length} will have to download them again`) - // // TODO ? record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // this.failedAccounts = this.failedAccounts.concat(failedHashes) - // for (let accountId of failedHashes) { - // account = this.mapAccountData[accountId] - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData ${accountId} data: ${utils.stringifyReduce(account)}`) - - // if (account != null) { - // if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData adding account to list`) - // this.accountsWithStateConflict.push(account) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData cant find data: ${accountId}`) - // if (accountId) { - // //this.accountsWithStateConflict.push({ address: accountId, }) //NOTE: fixed with refactor - // this.accountsWithStateConflict.push({ accountId: accountId, data: null, stateId: null, timestamp: 0 }) - // } - // } - // } - // } - - // await this.stateManager.writeCombinedAccountDataToBackups(goodAccounts, failedHashes) - - // this.combinedAccountData = [] // we can clear this now. - // } - - // async syncStateDataGlobalsFast(syncTracker: SyncTracker) { - // try { - // let partition = 'globals!' - - // let globalAccounts = [] - // let remainingAccountsToSync = [] - // this.partitionStartTimeStamp = Date.now() - - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals partition: ${partition} `) - - // this.readyforTXs = true - - // let globalReport: GlobalAccountReportResp = await this.getRobustGlobalReport() - - // let hasAllGlobalData = false - - // if (globalReport.accounts.length === 0) { - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals no global accounts `) - // return // no global accounts - // } - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals globalReport: ${utils.stringifyReduce(globalReport)} `) - - // let accountReportsByID: { [id: string]: { id: string; hash: string; timestamp: number } } = {} - // for (let report of globalReport.accounts) { - // remainingAccountsToSync.push(report.id) - - // accountReportsByID[report.id] = report - // } - // let accountData: Shardus.WrappedData[] = [] - // let accountDataById: { [id: string]: Shardus.WrappedData } = {} - // let globalReport2: GlobalAccountReportResp = { ready: false, combinedHash: '', accounts: [] } - // let maxTries = 10 - // while (hasAllGlobalData === false) { - // maxTries-- - // if (maxTries <= 0) { - // if (logFlags.error) this.mainLogger.error(`DATASYNC: syncStateDataGlobals max tries excceded `) - // return - // } - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals hasAllGlobalData === false `) - - // // Node Precheck! - // if (this.dataSourceNode == null || this.stateManager.isNodeValidForInternalMessage(this.dataSourceNode.id, 'syncStateDataGlobals', true, true) === false) { - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // break - // } - // continue - // } - - // let message = { accountIds: remainingAccountsToSync } - // let result = await this.p2p.ask(this.dataSourceNode, 'get_account_data_by_list', message) - - // if (result == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error('ASK FAIL syncStateTableData result == null') - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // break - // } - // continue - // } - // if (result.accountData == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error('ASK FAIL syncStateTableData result.accountData == null') - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // break - // } - // continue - // } - - // accountData = accountData.concat(result.accountData) - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals get_account_data_by_list ${utils.stringifyReduce(result)} `) - - // globalReport2 = await this.getRobustGlobalReport() - // let accountReportsByID2: { [id: string]: { id: string; hash: string; timestamp: number } } = {} - // for (let report of globalReport2.accounts) { - // accountReportsByID2[report.id] = report - // } - - // hasAllGlobalData = true - // remainingAccountsToSync = [] - // for (let account of accountData) { - // accountDataById[account.accountId] = account - // //newer copies will overwrite older ones in this map - // } - // //check the full report for any missing data - // for (let report of globalReport2.accounts) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals loop globalReport2.accounts `) - // let data = accountDataById[report.id] - // if (data == null) { - // //we dont have the data - // hasAllGlobalData = false - // remainingAccountsToSync.push(report.id) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals remainingAccountsToSync data===null ${utils.makeShortHash(report.id)} `) - // } else if (data.stateId !== report.hash) { - // //we have the data but he hash is wrong - // hasAllGlobalData = false - // remainingAccountsToSync.push(report.id) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals remainingAccountsToSync data.stateId !== report.hash ${utils.makeShortHash(report.id)} `) - // } - // } - // //set this report to the last report and continue. - // accountReportsByID = accountReportsByID2 - // } - - // let dataToSet = [] - // let cycleNumber = this.stateManager.currentCycleShardData.cycleNumber // Math.max(1, this.stateManager.currentCycleShardData.cycleNumber-1 ) //kinda hacky? - - // let goodAccounts: Shardus.WrappedData[] = [] - - // //Write the data! and set global memory data!. set accounts copy data too. - // for (let report of globalReport2.accounts) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals loop globalReport2.accounts 2`) - // let accountData = accountDataById[report.id] - // if (accountData != null) { - // dataToSet.push(accountData) - // goodAccounts.push(accountData) - // if (this.stateManager.accountGlobals.globalAccountMap.has(report.id)) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals has ${utils.makeShortHash(report.id)} hash: ${utils.makeShortHash(report.hash)} ts: ${report.timestamp}`) - // } else { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals setting ${utils.makeShortHash(report.id)} hash: ${utils.makeShortHash(report.hash)} ts: ${report.timestamp}`) - // // set the account in our table - // this.stateManager.accountGlobals.globalAccountMap.set(report.id, null) - // // push the time based backup count - // let accountId = report.id - // let data = accountData.data - // let timestamp = accountData.timestamp - // let hash = accountData.stateId - // let isGlobal = this.stateManager.accountGlobals.isGlobalAccount(accountId) - // let backupObj: Shardus.AccountsCopy = { accountId, data, timestamp, hash, cycleNumber, isGlobal } - // //if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug( `updateAccountsCopyTable acc.timestamp: ${timestamp} cycle computed:${cycleNumber} accountId:${utils.makeShortHash(accountId)}`) - // let globalBackupList: Shardus.AccountsCopy[] = this.stateManager.accountGlobals.getGlobalAccountBackupList(accountId) - // if (globalBackupList != null) { - // globalBackupList.push(backupObj) // sort and cleanup later. - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals push backup entry ${utils.makeShortHash(report.id)} hash: ${utils.makeShortHash(report.hash)} ts: ${report.timestamp}`) - // } - // } - // } - // } - - // let failedHashes = await this.stateManager.checkAndSetAccountData(dataToSet, 'syncStateDataGlobals', true) - - // if (logFlags.console) console.log('DBG goodAccounts', goodAccounts) - - // await this.stateManager.writeCombinedAccountDataToBackups(goodAccounts, failedHashes) - - // if (failedHashes && failedHashes.length > 0) { - // throw new Error('setting data falied no error handling for this yet') - // } - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals complete synced ${dataToSet.length} accounts `) - // } catch (error) { - // if (error.message.includes('FailAndRestartPartition')) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataGlobals Error Failed at: ${error.stack}`) - // this.statemanager_fatal(`syncStateDataGlobals_ex_failandrestart`, 'DATASYNC: syncStateDataGlobals FailAndRestartPartition: ' + errorToStringFull(error)) - // await this.failandRestart() - // } else { - // this.statemanager_fatal(`syncStateDataGlobals_ex`, 'syncStateDataGlobals failed: ' + errorToStringFull(error)) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: unexpected error. restaring sync:` + errorToStringFull(error)) - // await this.failandRestart() - // } - // } - - // this.globalAccountsSynced = true - // } - - // /** - // * check if account is newer than TX. - // * query StateTable to see if we alreayd have a record on this tx. - // * check if TX is older than account cache timestamp - // */ - // async testAccountTimesAndStateTable2(tx: Shardus.OpaqueTransaction, wrappedStates: WrappedStates) { - // let hasStateTableData = false - - // function tryGetAccountData(accountID: string) { - // return wrappedStates[accountID] - // } - - // try { - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { sourceKeys, targetKeys, timestamp } = keysResponse - // let sourceAddress, sourceState, targetState - - // // check account age to make sure it is older than the tx - // let failedAgeCheck = false - - // let accountKeys = Object.keys(wrappedStates) - // for (let key of accountKeys) { - // let accountEntry = tryGetAccountData(key) - // if (accountEntry.timestamp >= timestamp) { - // failedAgeCheck = true - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTimesAndStateTable account has future state. id: ' + utils.makeShortHash(accountEntry.accountId) + ' time: ' + accountEntry.timestamp + ' txTime: ' + timestamp + ' delta: ' + (timestamp - accountEntry.timestamp)) - // } - // } - // if (failedAgeCheck) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('DATASYNC: testAccountTimesAndStateTable accounts have future state ' + timestamp) - // return { success: false, hasStateTableData } - // } - - // // TODO: even if we keep the code below this line, we should consider combining keys in a set first so that we dont - // // double up on work if a key is a source and target. - - // // check state table - // if (Array.isArray(sourceKeys) && sourceKeys.length > 0) { - // sourceAddress = sourceKeys[0] - // let accountStates = await this.storage.searchAccountStateTable(sourceAddress, timestamp) - // if (accountStates.length !== 0) { - // let accountEntry = tryGetAccountData(sourceAddress) - // if (accountEntry == null) { - // return { success: false, hasStateTableData } - // } - // sourceState = accountEntry.stateId - // hasStateTableData = true - // if (accountStates.length === 0 || accountStates[0].stateBefore !== sourceState) { - // if (accountStates[0].stateBefore === '0'.repeat(64)) { - // //sorta broken security hole. - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTimesAndStateTable ' + timestamp + 'bypass state comparision if before state was 00000: ' + utils.makeShortHash(sourceState) + ' stateTable: ' + utils.makeShortHash(accountStates[0].stateBefore) + ' address: ' + utils.makeShortHash(sourceAddress)) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log('testAccountTimesAndStateTable ' + timestamp + ' cant apply state 1') - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTimesAndStateTable ' + timestamp + ' cant apply state 1 stateId: ' + utils.makeShortHash(sourceState) + ' stateTable: ' + utils.makeShortHash(accountStates[0].stateBefore) + ' address: ' + utils.makeShortHash(sourceAddress)) - // return { success: false, hasStateTableData } - // } - // } - // } - // } - // if (Array.isArray(targetKeys) && targetKeys.length > 0) { - // // targetAddress = targetKeys[0] - // for (let targetAddress of targetKeys) { - // let accountStates = await this.storage.searchAccountStateTable(targetAddress, timestamp) - - // if (accountStates.length !== 0) { - // hasStateTableData = true - // if (accountStates.length !== 0 && accountStates[0].stateBefore !== allZeroes64) { - // let accountEntry = tryGetAccountData(targetAddress) - - // if (accountEntry == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log('testAccountTimesAndStateTable ' + timestamp + ' target state does not exist. address: ' + utils.makeShortHash(targetAddress)) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTimesAndStateTable ' + timestamp + ' target state does not exist. address: ' + utils.makeShortHash(targetAddress) + ' accountDataList: ') - // this.statemanager_fatal(`testAccountTimesAndStateTable_noEntry`, 'testAccountTimesAndStateTable ' + timestamp + ' target state does not exist. address: ' + utils.makeShortHash(targetAddress) + ' accountDataList: ') // todo: consider if this is just an error - // // fail this because we already check if the before state was all zeroes - // return { success: false, hasStateTableData } - // } else { - // targetState = accountEntry.stateId - // if (accountStates[0].stateBefore !== targetState) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log('testAccountTimesAndStateTable ' + timestamp + ' cant apply state 2') - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTimesAndStateTable ' + timestamp + ' cant apply state 2 stateId: ' + utils.makeShortHash(targetState) + ' stateTable: ' + utils.makeShortHash(accountStates[0].stateBefore) + ' address: ' + utils.makeShortHash(targetAddress)) - // return { success: false, hasStateTableData } - // } - // } - // } - // } - // } - // } - // } catch (ex) { - // this.statemanager_fatal(`testAccountTimesAndStateTable_ex`, 'testAccountTimesAndStateTable failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } - // return { success: true, hasStateTableData } - // } - - // /** - // * testAccountTimes - // * check to see if any of the account data has timestamps newer or equal to the transaction - // * @param tx - // * @param wrappedStates - // */ - // testAccountTimes(tx: Shardus.OpaqueTransaction, wrappedStates: WrappedStates) { - // try { - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { sourceKeys, targetKeys, timestamp } = keysResponse - - // // check account age to make sure it is older than the tx - // let failedAgeCheck = false - - // let accountKeys = Object.keys(wrappedStates) - // for (let key of accountKeys) { - // let accountEntry = wrappedStates[key] - // if (accountEntry.timestamp >= timestamp) { - // failedAgeCheck = true - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('testAccountTimes account has future state. id: ' + utils.makeShortHash(accountEntry.accountId) + ' time: ' + accountEntry.timestamp + ' txTime: ' + timestamp + ' delta: ' + (timestamp - accountEntry.timestamp)) - // } - // } - // if (failedAgeCheck) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('DATASYNC: testAccountTimes accounts have future state ' + timestamp) - - // return { success: false } - // } - - // } catch (ex) { - // this.statemanager_fatal(`testAccountTimes_ex`, 'testAccountTimesAndStateTable failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // } - // return { success: true } - // } - - // /** - // * tryPreApplyTransaction this will try to apply a transaction but will not commit the data - // * @param acceptedTX - // * @param hasStateTableData - // * @param repairing - // * @param filter - // * @param wrappedStates - // * @param localCachedData - // */ - // async tryPreApplyTransaction(acceptedTX: AcceptedTx, hasStateTableData: boolean, repairing: boolean, filter: AccountFilter, wrappedStates: WrappedResponses, localCachedData: LocalCachedData): Promise<{ passed: boolean; applyResult: string; applyResponse?: Shardus.ApplyResponse }> { - // let ourLockID = -1 - // let accountDataList - // let txTs = 0 - // let accountKeys = [] - // let ourAccountLocks = null - // let applyResponse: Shardus.ApplyResponse | null = null - // //have to figure out if this is a global modifying tx, since that impacts if we will write to global account. - // let isGlobalModifyingTX = false - - // try { - // let tx = acceptedTX.data - // // let receipt = acceptedTX.receipt - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { timestamp, debugInfo } = keysResponse - // txTs = timestamp - - // let queueEntry = this.getQueueEntry(acceptedTX.id) - // if (queueEntry != null) { - // if (queueEntry.globalModification === true) { - // isGlobalModifyingTX = true - // } - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryPreApplyTransaction txid:${utils.stringifyReduce(acceptedTX.id)} ts:${timestamp} repairing:${repairing} hasStateTableData:${hasStateTableData} isGlobalModifyingTX:${isGlobalModifyingTX} Applying! debugInfo: ${debugInfo}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryPreApplyTransaction filter: ${utils.stringifyReduce(filter)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryPreApplyTransaction acceptedTX: ${utils.stringifyReduce(acceptedTX)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryPreApplyTransaction wrappedStates: ${utils.stringifyReduce(wrappedStates)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryPreApplyTransaction localCachedData: ${utils.stringifyReduce(localCachedData)}`) - - // // TODO ARCH REVIEW: review use of fifo lock of accountModification and account keys. - // // I think we need to consider adding reader-writer lock support so that a non written to global account is a "reader" lock: check but dont aquire - // // consider if it is safe to axe the use of fifolock accountModification. - // if (repairing !== true) { - // // get a list of modified account keys that we will lock - // let { sourceKeys, targetKeys } = keysResponse - // for (let accountID of sourceKeys) { - // accountKeys.push(accountID) - // } - // for (let accountID of targetKeys) { - // accountKeys.push(accountID) - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` tryPreApplyTransaction FIFO lock outer: ${utils.stringifyReduce(accountKeys)} `) - // ourAccountLocks = await this.stateManager.bulkFifoLockAccounts(accountKeys) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` tryPreApplyTransaction FIFO lock inner: ${utils.stringifyReduce(accountKeys)} ourLocks: ${utils.stringifyReduce(ourAccountLocks)}`) - // } - - // ourLockID = await this.stateManager.fifoLock('accountModification') - - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log(`tryPreApplyTransaction ts:${timestamp} repairing:${repairing} Applying!`) - // this.applySoftLock = true - - // applyResponse = this.app.apply(tx as Shardus.IncomingTransaction, wrappedStates) - // let { stateTableResults, accountData: _accountdata } = applyResponse - // accountDataList = _accountdata - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryPreApplyTransaction post apply wrappedStates: ${utils.stringifyReduce(wrappedStates)}`) - - // this.applySoftLock = false - // } catch (ex) { - // /* prettier-ignore */ if(logFlags.error) if (logFlags.error) this.mainLogger.error(`tryPreApplyTransaction failed id:${utils.makeShortHash(acceptedTX.id)}: ` + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // /* prettier-ignore */ if(logFlags.error) if (logFlags.error) this.mainLogger.error(`tryPreApplyTransaction failed id:${utils.makeShortHash(acceptedTX.id)} ${utils.stringifyReduce(acceptedTX)}`) - - // return { passed: false, applyResponse, applyResult: ex.message } - // } finally { - // this.stateManager.fifoUnlock('accountModification', ourLockID) - // if (repairing !== true) { - // if (ourAccountLocks != null) { - // this.stateManager.bulkFifoUnlockAccounts(accountKeys, ourAccountLocks) - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` tryPreApplyTransaction FIFO unlock inner: ${utils.stringifyReduce(accountKeys)} ourLocks: ${utils.stringifyReduce(ourAccountLocks)}`) - // } - // } - - // return { passed: true, applyResponse, applyResult: 'applied' } - // } - - // /** - // * preApplyAcceptedTransaction will apply a transaction to the in memory data but will not save the results to the database yet - // * @param acceptedTX - // * @param wrappedStates - // * @param localCachedData - // * @param filter - // */ - // async preApplyAcceptedTransaction_old(acceptedTX: AcceptedTx, wrappedStates: WrappedResponses, localCachedData: LocalCachedData, filter: AccountFilter): Promise { - // if (this.queueStopped) return - // let tx = acceptedTX.data - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { sourceKeys, targetKeys, timestamp, debugInfo } = keysResponse - - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log('preApplyAcceptedTransaction ' + timestamp + ' debugInfo:' + debugInfo) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('applyAcceptedTransaction ' + timestamp + ' debugInfo:' + debugInfo) - - // let allkeys: string[] = [] - // allkeys = allkeys.concat(sourceKeys) - // allkeys = allkeys.concat(targetKeys) - - // let accountTimestampsAreOK = true - - // for (let key of allkeys) { - // if (wrappedStates[key] == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log(`preApplyAcceptedTransaction missing some account data. timestamp:${timestamp} key: ${utils.makeShortHash(key)} debuginfo:${debugInfo}`) - // return { applied: false, passed: false, applyResult: '', reason: 'missing some account data' } - // } else { - // let wrappedState = wrappedStates[key] - // wrappedState.prevStateId = wrappedState.stateId - // wrappedState.prevDataCopy = utils.deepCopy(wrappedState.data) - - // // important to update the wrappedState timestamp here to prevent bad timestamps from propagating the system - // let { timestamp: updatedTimestamp, hash: updatedHash } = this.app.getTimestampAndHashFromAccount(wrappedState.data) - // wrappedState.timestamp = updatedTimestamp - - // // check if current account timestamp is too new for this TX - // if(wrappedState.timestamp >= timestamp){ - // accountTimestampsAreOK = false - // break; - // } - // } - // } - - // // // TODO ARCH REVIEW: the function does some slow stuff in terms of DB access. can we replace this with accounts cache functionality? - // // // old note: todo review what we are checking here. - // // let { success } = this.testAccountTimes(tx, wrappedStates) - // let hasStateTableData = false // todo eliminate this. - - // if (!accountTimestampsAreOK) { - // if (logFlags.verbose) this.mainLogger.debug('preApplyAcceptedTransaction pretest failed: ' + timestamp) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tx_preapply_rejected 1', `${acceptedTX.id}`, `Transaction: ${utils.stringifyReduce(acceptedTX)}`) - // return { applied: false, passed: false, applyResult: '', reason: 'preApplyAcceptedTransaction pretest failed, TX rejected' } - // } - - // // TODO STATESHARDING4 I am not sure if this really needs to be split into a function anymore. - // // That mattered with data repair in older versions of the code, but that may be the wrong thing to do now - // let preApplyResult = await this.tryPreApplyTransaction(acceptedTX, hasStateTableData, false, filter, wrappedStates, localCachedData) - - // if (preApplyResult) { - // if (logFlags.verbose) this.mainLogger.debug('preApplyAcceptedTransaction SUCCEDED ' + timestamp) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tx_preapplied', `${acceptedTX.id}`, `AcceptedTransaction: ${utils.stringifyReduce(acceptedTX)}`) - - // } else { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tx_preapply_rejected 3', `${acceptedTX.id}`, `Transaction: ${utils.stringifyReduce(acceptedTX)}`) - // } - - // return { applied: true, passed: preApplyResult.passed, applyResult: preApplyResult.applyResult, reason: 'apply result', applyResponse: preApplyResult.applyResponse } - // } - - // async commitConsensedTransaction_old(applyResponse: Shardus.ApplyResponse, acceptedTX: AcceptedTx, hasStateTableData: boolean, repairing: boolean, filter: AccountFilter, wrappedStates: WrappedResponses, localCachedData: LocalCachedData): Promise { - // let ourLockID = -1 - // let accountDataList - // let txTs = 0 - // let accountKeys = [] - // let ourAccountLocks = null - - // //have to figure out if this is a global modifying tx, since that impacts if we will write to global account. - // let isGlobalModifyingTX = false - // let savedSomething = false - // try { - // let tx = acceptedTX.data - // // let receipt = acceptedTX.receipt - // let keysResponse = this.app.getKeyFromTransaction(tx) - // let { timestamp, debugInfo } = keysResponse - // txTs = timestamp - - // let queueEntry = this.getQueueEntry(acceptedTX.id) - // if (queueEntry != null) { - // if (queueEntry.globalModification === true) { - // isGlobalModifyingTX = true - // } - // } - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction ts:${timestamp} repairing:${repairing} hasStateTableData:${hasStateTableData} isGlobalModifyingTX:${isGlobalModifyingTX} Applying! debugInfo: ${debugInfo}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction filter: ${utils.stringifyReduce(filter)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction acceptedTX: ${utils.stringifyReduce(acceptedTX)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction wrappedStates: ${utils.stringifyReduce(wrappedStates)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction localCachedData: ${utils.stringifyReduce(localCachedData)}`) - - // // TODO ARCH REVIEW: review use of fifo lock of accountModification and account keys. (more notes in tryPreApplyTransaction() above ) - // if (repairing !== true) { - // // get a list of modified account keys that we will lock - // let { sourceKeys, targetKeys } = keysResponse - // for (let accountID of sourceKeys) { - // accountKeys.push(accountID) - // } - // for (let accountID of targetKeys) { - // accountKeys.push(accountID) - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`commitConsensedTransaction FIFO lock outer: ${utils.stringifyReduce(accountKeys)} `) - // ourAccountLocks = await this.stateManager.bulkFifoLockAccounts(accountKeys) - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(`commitConsensedTransaction FIFO lock inner: ${utils.stringifyReduce(accountKeys)} ourLocks: ${utils.stringifyReduce(ourAccountLocks)}`) - // } - - // ourLockID = await this.stateManager.fifoLock('accountModification') - - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log(`commitConsensedTransaction ts:${timestamp} repairing:${repairing} Applying!`) - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('APPSTATE: tryApplyTransaction ' + timestamp + ' Applying!' + ' source: ' + utils.makeShortHash(sourceAddress) + ' target: ' + utils.makeShortHash(targetAddress) + ' srchash_before:' + utils.makeShortHash(sourceState) + ' tgtHash_before: ' + utils.makeShortHash(targetState)) - // this.applySoftLock = true - - // let { stateTableResults, accountData: _accountdata } = applyResponse - // accountDataList = _accountdata - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction post apply wrappedStates: ${utils.stringifyReduce(wrappedStates)}`) - - // let note = `setAccountData: tx:${queueEntry.logID} in commitConsensedTransaction. ` - - // // wrappedStates are side effected for now - // savedSomething = await this.stateManager.setAccount(wrappedStates, localCachedData, applyResponse, isGlobalModifyingTX, filter, note) - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction savedSomething: ${savedSomething}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction accountData[${accountDataList.length}]: ${utils.stringifyReduce(accountDataList)}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction stateTableResults[${stateTableResults.length}]: ${utils.stringifyReduce(stateTableResults)}`) - - // this.applySoftLock = false - // // only write our state table data if we dont already have it in the db - // //if (hasStateTableData === false) { - // for (let stateT of stateTableResults) { - // // we have to correct this because it now gets stomped in the vote - // let wrappedRespose = wrappedStates[stateT.accountId] - // stateT.stateBefore = wrappedRespose.prevStateId - - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.console) console.log('writeStateTable ' + utils.makeShortHash(stateT.accountId) + ' accounts total' + accountDataList.length) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('writeStateTable ' + utils.makeShortHash(stateT.accountId) + ' before: ' + utils.makeShortHash(stateT.stateBefore) + ' after: ' + utils.makeShortHash(stateT.stateAfter) + ' txid: ' + utils.makeShortHash(acceptedTX.id) + ' ts: ' + acceptedTX.timestamp) - // } - // await this.storage.addAccountStates(stateTableResults) - // // //want to confirm that we pretty much alway take this branch - // // //pretty sure we would not have this data now - // // nestedCountersInstance.countEvent('stateManager', 'txCommit hasOldStateTable = false') - // // } else { - // // nestedCountersInstance.countEvent('stateManager', 'txCommit hasOldStateTable = true') - // // } - - // // post validate that state ended up correctly? - - // // write the accepted TX to storage - // this.storage.addAcceptedTransactions([acceptedTX]) - - // // endpoint to allow dapp to execute something that depends on a transaction being approved. - // this.app.transactionReceiptPass(acceptedTX.data, wrappedStates, applyResponse) - // } catch (ex) { - // this.statemanager_fatal(`commitConsensedTransaction_ex`, 'commitConsensedTransaction failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`commitConsensedTransaction failed id:${utils.makeShortHash(acceptedTX.id)} ${utils.stringifyReduce(acceptedTX)}`) - // if (applyResponse) { - // // && savedSomething){ - // // TSConversion do we really want to record this? - // // if (!repairing) this.stateManager.partitionObjects.tempRecordTXByCycle(txTs, acceptedTX, false, applyResponse, isGlobalModifyingTX, savedSomething) - // // record no-op state table fail: - // } else { - // // this.fatalLogger.fatal('tryApplyTransaction failed: applyResponse == null') - // } - - // return { success: false } - // } finally { - // this.stateManager.fifoUnlock('accountModification', ourLockID) - // if (repairing !== true) { - // if (ourAccountLocks != null) { - // this.stateManager.bulkFifoUnlockAccounts(accountKeys, ourAccountLocks) - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`commitConsensedTransaction FIFO unlock inner: ${utils.stringifyReduce(accountKeys)} ourLocks: ${utils.stringifyReduce(ourAccountLocks)}`) - // } - // } - - // // have to wrestle with the data a bit so we can backup the full account and not jsut the partial account! - // // let dataResultsByKey = {} - // let dataResultsFullList = [] - // for (let wrappedData of applyResponse.accountData) { - // // if (wrappedData.isPartial === false) { - // // dataResultsFullList.push(wrappedData.data) - // // } else { - // // dataResultsFullList.push(wrappedData.localCache) - // // } - // if (wrappedData.localCache != null) { - // dataResultsFullList.push(wrappedData) - // } - // // dataResultsByKey[wrappedData.accountId] = wrappedData.data - // } - - // // this is just for debug!!! - // if (dataResultsFullList[0] == null) { - // for (let wrappedData of applyResponse.accountData) { - // if (wrappedData.localCache != null) { - // dataResultsFullList.push(wrappedData) - // } - // // dataResultsByKey[wrappedData.accountId] = wrappedData.data - // } - // } - // // if(dataResultsFullList == null){ - // // throw new Error(`tryApplyTransaction (dataResultsFullList == null ${txTs} ${utils.stringifyReduce(acceptedTX)} `); - // // } - - // // TSConversion verified that app.setAccount calls shardus.applyResponseAddState that adds hash and txid to the data and turns it into AccountData - // let upgradedAccountDataList: Shardus.AccountData[] = (dataResultsFullList as unknown) as Shardus.AccountData[] - - // // TODO ARCH REVIEW: do we still need this table. if so do we need to await writing to it? - // await this.stateManager.updateAccountsCopyTable(upgradedAccountDataList, repairing, txTs) - - // if (!repairing) { - // //if(savedSomething){ - // //this.stateManager.partitionObjects.tempRecordTXByCycle(txTs, acceptedTX, true, applyResponse, isGlobalModifyingTX, savedSomething) - // //} - - // //WOW this was not good! had acceptedTX.transactionGroup[0].id - // //if (this.p2p.getNodeId() === acceptedTX.transactionGroup[0].id) { - - // let queueEntry: QueueEntry | null = this.getQueueEntry(acceptedTX.id) - // if (queueEntry != null && queueEntry.transactionGroup != null && this.p2p.getNodeId() === queueEntry.transactionGroup[0].id) { - // this.stateManager.eventEmitter.emit('txProcessed') - // } - // this.stateManager.eventEmitter.emit('txApplied', acceptedTX) - - // this.stateManager.partitionStats.statsTxSummaryUpdate(queueEntry.cycleToRecordOn, queueEntry) - // for (let wrappedData of applyResponse.accountData) { - // //this.stateManager.partitionStats.statsDataSummaryUpdate(wrappedData.prevDataCopy, wrappedData) - - // let queueData = queueEntry.collectedData[wrappedData.accountId] - - // if (queueData != null) { - // if (queueData.accountCreated) { - // //account was created to do a summary init - // //this.stateManager.partitionStats.statsDataSummaryInit(queueEntry.cycleToRecordOn, queueData); - // this.stateManager.partitionStats.statsDataSummaryInitRaw(queueEntry.cycleToRecordOn, queueData.accountId, queueData.prevDataCopy) - // } - // this.stateManager.partitionStats.statsDataSummaryUpdate2(queueEntry.cycleToRecordOn, queueData.prevDataCopy, wrappedData) - // } else { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`commitConsensedTransaction failed to get account data for stats ${wrappedData.accountId}`) - // } - // } - // } - - // return { success: true } - // } - - // /** - // * @param {ShardGlobals} shardGlobals - // * @param {number} s1 stored partition left lower bound (always 0) - // * @param {number} e1 stored partition left upper bound - // * @param {number} s2 stored partition right lower bound - // * @param {number} e2 stored partition right upper bound (always highest parition) - // * @param {number} start start of consensus range - // * @param {number} end end of consensus range - // * @returns {{s1:number; e1: number; s2: number; e2: number; split: boolean; changed: boolean }} - // */ - // static mergeDiverseRanges(shardGlobals: ShardGlobals, s1: number, e1: number, s2: number, e2: number, start: number, end: number): MergeResults { - // let results = { s1, e1, s2, e2, split: true, changed: false } - - // //These refer to the consensus range haning off the edge of the stored range. i.e. consenus going further than stored. - // let leftOverlap = false - // let rightOverlap = false - // let leftOverhang = false - // let rightOverhang = false - - // let nonSplitConsensusRange = start <= end - - // let storedfullyOverlapsConsensus = false - - // // check overlap in simple case where consensus does not wrap - // if (nonSplitConsensusRange) { - // // left overlap. Really this means that there is some overlap of consenus and the left side range of stored partitions - // if (s1 <= start && e1 >= start) { - // // number = partition id. s = stored partition c = consensus partition ^ = where the test point or points are - // // 0123456789 - // // sss s - // // ccc - // // ^ - // leftOverlap = true - // } - // // right overlap Really this means that there is some overlap of consenus and the right side range of stored partitions - // if (s2 <= end && e2 >= end) { - // // 0123456789 - // // s sss - // // ccc - // // ^ - // rightOverlap = true - // } - - // // full overlap left - // if (s1 <= start && e1 >= end) { - // // 0123456789 - // // sssss s - // // ccc - // // ^ ^ - // storedfullyOverlapsConsensus = true - // } - // // full overlap right - // if (s2 <= start && e2 >= end) { - // // 0123456789 - // // s sssss - // // ccc - // // ^ ^ - // storedfullyOverlapsConsensus = true - // } - // } - - // // If the consensus range wraps across our ranges then we need to check additional cases - // if (nonSplitConsensusRange === false) { - // if (s1 <= end && e1 >= end && s2 <= start && e2 >= start) { - // // 0123456789 - // // ssss ss - // // ccc c - // // ^ ^ ^ - // storedfullyOverlapsConsensus = true - // } - - // //cases not caught yet? - // // 0123456789 - // // ss sss - // // c cccc - // // - - // // 0123456789 - // // ss sss - // // ccc cc - // // - - // if (end > e1 && end < s2) { - // // 0123456789 - // // ss sss - // // ccc cc - // // ^ - // leftOverhang = true - // } - // // right overlap Really this means that there is some overlap of consenus and the right side range of stored partitions - // if (start < s2 && start > e1) { - // // number = partition id. s = stored partition c = consensus partition ^ = where the test point or points are - // // 0123456789 - // // ss sss - // // c cccc - // // ^ - // rightOverhang = true - // } - // } - - // // nothing to do ther is full overlap - // if (storedfullyOverlapsConsensus === true) { - // return results - // } - - // if (leftOverlap === false && rightOverlap === false && nonSplitConsensusRange === true) { - // let partitionDistanceStart = ShardFunctions.circularDistance(start, e1, shardGlobals.numPartitions) - // let partitionDistanceEnd = ShardFunctions.circularDistance(end, s2, shardGlobals.numPartitions) - - // if (partitionDistanceStart < partitionDistanceEnd) { - // // 0123456789 - // // ss ss - // // cc - // // rrrrr rr r= result range - // if (results.e1 < end) { - // results.e1 = end - // results.changed = true - // return results - // } - // } else { - // // 0123456789 - // // ss ss - // // cc - // // rr rrrrr r= result range - // if (results.s2 > start) { - // results.s2 = start - // results.changed = true - // return results - // } - // } - // } - - // if (leftOverlap === true && rightOverlap === true && nonSplitConsensusRange === true) { - // // if left and right overlap then all partitions are stored: - // // 0123456789 - // // ss sss - // // ccccccc - // // rrrrrrrrrr r= result range - // if (results.e1 !== results.e2) { - // results.split = false - // results.e1 = results.e2 // s1 -> e1 covers entire range - // results.changed = true - // return results - // } - // } - - // if (leftOverlap) { - // // 0123456789 - // // sss s - // // ccc - // // rrrr r - // if (results.e1 < end) { - // results.e1 = end - // results.changed = true - // } - // } - // if (rightOverlap) { - // // 0123456789 - // // sss s - // // ccc - // // rrrr r - // if (results.s2 > start) { - // results.s2 = start - // results.changed = true - // } - // } - - // if (leftOverhang) { - // // 0123456789 - // // ss sss - // // ccc cc - // // rrr rrr r= result range - // if (results.e1 < end) { - // results.e1 = end - // results.changed = true - // } - // } - // if (rightOverhang) { - // // 0123456789 - // // s sss - // // ccc - // // r rrrr r= result range - // if (results.s2 > start) { - // results.s2 = start - // results.changed = true - // } - // } - - // return results - // } - - // //TODO TSConversion get a better output type than any.. switch to an object maybe. - // static addressToPartition_old(shardGlobals: ShardGlobals, address: string): { homePartition: number; addressNum: number } { - // let numPartitions = shardGlobals.numPartitions - // let addressNum = parseInt(address.slice(0, 8), 16) - // let homePartition = Math.floor(numPartitions * (addressNum / 0xffffffff)) - // return { homePartition, addressNum } - // } - - // // todo memoize this per cycle!!! - // // TODO TSConversion partitionMax was equal to null before as optional param. what to do now? - // static partitionToAddressRange2_old(shardGlobals: ShardGlobals, partition: number, paritionMax?: number): AddressRange { - // let result = {} as AddressRange - // result.partition = partition - // let startAddr = 0xffffffff * (partition / shardGlobals.numPartitions) - // startAddr = Math.ceil(startAddr) - - // result.p_low = partition - // //result.p_high = paritionMax // was a TS error - - // let endPartition = partition + 1 - // if (paritionMax) { - // result.p_high = paritionMax - // endPartition = paritionMax + 1 - // } else { - // //result.p_high = partition - // } - // result.partitionEnd = endPartition - // let endAddr = 0xffffffff * (endPartition / shardGlobals.numPartitions) - // endAddr = Math.ceil(endAddr) - - // // if(endAddr > 0){ - // // endAddr = endAddr - 1 - // // } - - // // it seems we dont need/want this code: - // // if (paritionMax === null) { - // // endAddr-- // - 1 // subtract 1 so we don't go into the nex partition - // // } - - // result.startAddr = startAddr - // result.endAddr = endAddr - - // result.low = ('00000000' + startAddr.toString(16)).slice(-8) + '0'.repeat(56) - // result.high = ('00000000' + endAddr.toString(16)).slice(-8) + 'f'.repeat(56) - - // return result - // } - - // // TSConversion fix up any[] - // static getNodesThatCoverRange(shardGlobals: ShardGlobals, lowAddress: string, highAddress: string, exclude: string[], activeNodes: Shardus.Node[]) { - // // calculate each nodes address position. - // // calculate if the nodes reach would cover our full range listed. - // // could we use start + delete to avoid wrapping? - - // let circularDistance = function (a: number, b: number, max: number): number { - // let directDist = Math.abs(a - b) - - // let wrapDist = directDist - // // if (a < b) { - // // wrapDist = Math.abs(a + (max - b)) - // // } else if (b < a) { - // // wrapDist = Math.abs(b + (max - a)) - // // } - - // let wrapDist1 = Math.abs(a + (max - b)) - // let wrapDist2 = Math.abs(b + (max - a)) - // wrapDist = Math.min(wrapDist1, wrapDist2) - - // return Math.min(directDist, wrapDist) - // } - - // let numPartitions = shardGlobals.numPartitions - // let nodeLookRange = shardGlobals.nodeLookRange - - // let range = [] as any[] - - // let lowAddressNum = parseInt(lowAddress.slice(0, 8), 16) // assume trailing 0s - // let highAddressNum = parseInt(highAddress.slice(0, 8), 16) + 1 // assume trailng fffs - - // // todo start and end loop at smarter areas for efficieny reasones! - // let distLow = 0 - // let distHigh = 0 - - // // This isn't a great loop to have for effiency reasons. - // for (let i = 0; i < activeNodes.length; i++) { - // let node = activeNodes[i] - // if (exclude.includes(node.id)) { - // continue - // } - - // // could look up node by address?? - - // // calculate node middle address.. - // let nodeAddressNum = parseInt(node.id.slice(0, 8), 16) - // // Fix this the center of a partition boundry?? - // let homePartition = Math.floor(numPartitions * (nodeAddressNum / 0xffffffff)) - // let centeredAddress = Math.floor(((homePartition + 0.5) * 0xffffffff) / numPartitions) - - // // Math.min(Math.abs(centeredAddress - lowAddressNum), Math.abs(centeredAddress - lowAddressNum)) - - // distLow = circularDistance(centeredAddress, lowAddressNum, 0xffffffff) - nodeLookRange - // distHigh = circularDistance(centeredAddress, highAddressNum, 0xffffffff) - nodeLookRange - // // if (circularDistance(centeredAddress, lowAddressNum, 0xffffffff) > nodeLookRange) { - // // continue - // // } - // // if (circularDistance(centeredAddress, highAddressNum, 0xffffffff) > nodeLookRange) { - // // continue - // // } - - // if (distLow > 0 && distHigh > 0) { - // continue - // } - - // // if (Math.abs(centeredAddress - lowAddressNum) > nodeLookRange) { - // // continue - // // } - // // if (Math.abs(centeredAddress - highAddressNum) > nodeLookRange) { - // // continue - // // } - // // we are in range! - // range.push(node) - // } - // return range - // } - - // /** - // * This will find two address that are close to what we want - // * @param {string} address - // * @returns {{address1:string; address2:string}} - // * - // */ - // static getNextAdjacentAddresses_wip(address: string) { - // let addressNum = parseInt(address.slice(0, 8), 16) - - // let addressPrefixHex = ShardFunctions.leadZeros8(addressNum.toString(16)) - - // let trail = address.slice(8, 64) - - // if (trail === 'f'.repeat(56)) { - // //If we are not at the end look one ahead - // if (addressNum < 4294967295) { - // addressNum = addressNum + 1 - // } - - // let addressPrefixHex2 = ShardFunctions.leadZeros8(addressNum.toString(16)) - - // let address1 = addressPrefixHex + 'f'.repeat(56) - // let address2 = addressPrefixHex2 + '0'.repeat(56) - // return { address1, address2 } - // } else { - // // if(trail === '0'.repeat(56)){ - // //If we are not at the end look one ahead - // let addressPrefixHex2 = ShardFunctions.leadZeros8(addressNum.toString(16)) - - // let address1 = addressPrefixHex + '0'.repeat(56) - // let address2 = addressPrefixHex2 + '0'.repeat(55) + '1' - // return { address1, address2 } - // } - // //else real math. - // } - - // /** - // * getShardDataForCycle - // * @param {number} cycleNumber - // * @returns {CycleShardData} - // */ - // getShardDataForCycle(cycleNumber: number): CycleShardData | null { - // if (this.shardValuesByCycle == null) { - // return null - // } - // let shardData = this.shardValuesByCycle.get(cycleNumber) - // //kind of silly but dealing with undefined response from get TSConversion: todo investigate merit of |null vs. |undefined conventions - // if (shardData != null) { - // return shardData - // } - // return null - // } - - // interruptibleSleep(ms: number, targetTime: number) { - // let resolveFn: any = null //TSConversion just setting this to any for now. - // let promise = new Promise((resolve) => { - // resolveFn = resolve - // setTimeout(resolve, ms) - // }) - // return { promise, resolveFn, targetTime } - // } - - // interruptSleepIfNeeded(targetTime: number) { - // if (this.sleepInterrupt) { - // if (targetTime < this.sleepInterrupt.targetTime) { - // this.sleepInterrupt.resolveFn() - // } - // } - // } - - // // todo refactor: move to p2p? - // getRandomNodesInRange(count: number, lowAddress: string, highAddress: string, exclude: string[]): Shardus.Node[] { - // const allNodes = activeOthersByIdOrder - // this.lastActiveNodeCount = allNodes.length - // utils.shuffleArray(allNodes) - // let results = [] as Shardus.Node[] - // if (allNodes.length <= count) { - // count = allNodes.length - // } - // for (const node of allNodes) { - // if (node.id >= lowAddress && node.id <= highAddress) { - // if (exclude.includes(node.id) === false) { - // results.push(node) - // if (results.length >= count) { - // return results - // } - // } - // } - // } - // return results - // } - - // // This will make calls to app.getAccountDataByRange but if we are close enough to real time it will query any newer data and return lastUpdateNeeded = true - // async getAccountDataByRangeSmart_App(accountStart: string, accountEnd: string, tsStart: number, maxRecords: number): Promise { - // let tsEnd = Date.now() - // let wrappedAccounts = await this.app.getAccountDataByRange(accountStart, accountEnd, tsStart, tsEnd, maxRecords) - // let lastUpdateNeeded = false - // let wrappedAccounts2: WrappedStateArray = [] - // let highestTs = 0 - // let delta = 0 - // // do we need more updates - // if (wrappedAccounts.length === 0) { - // lastUpdateNeeded = true - // } else { - // // see if our newest record is new enough - // highestTs = 0 - // for (let account of wrappedAccounts) { - // if (account.timestamp > highestTs) { - // highestTs = account.timestamp - // } - // } - // delta = tsEnd - highestTs - // // if the data we go was close enough to current time then we are done - // // may have to be carefull about how we tune this value relative to the rate that we make this query - // // we should try to make this query more often then the delta. - // if (logFlags.verbose) console.log('delta ' + delta) - // // increased allowed delta to allow for a better chance to catch up - // if (delta < this.queueSitTime * 2) { - // let tsStart2 = highestTs - // wrappedAccounts2 = await this.app.getAccountDataByRange(accountStart, accountEnd, tsStart2, Date.now(), 10000000) - // lastUpdateNeeded = true - // } - // } - // return { wrappedAccounts, lastUpdateNeeded, wrappedAccounts2, highestTs, delta } - // } - - // /** - // * storePartitionReceipt - // * TODO sharding perf. may need to do periodic cleanup of this and other maps so we can remove data from very old cycles - // * TODO production need to do something with this data - // * @param {number} cycleNumber - // * @param {PartitionReceipt} partitionReceipt - // */ - // storePartitionReceipt(cycleNumber: number, partitionReceipt: PartitionReceipt) { - // let key = 'c' + cycleNumber - - // if (!this.partitionReceiptsByCycleCounter) { - // this.partitionReceiptsByCycleCounter = {} - // } - // if (!this.partitionReceiptsByCycleCounter[key]) { - // this.partitionReceiptsByCycleCounter[key] = [] - // } - // this.partitionReceiptsByCycleCounter[key].push(partitionReceipt) - - // // if (this.debugFeatureOld_partitionReciepts === true) { - // // // this doesnt really send to the archiver but it it does dump reciepts to logs. - // // this.depricated.trySendAndPurgeReceiptsToArchives(partitionReceipt) - // // } - // } - - // /** - // * getCycleNumberFromTimestamp - // * cycle numbers are calculated from the queue entry timestamp, but an offset is needed so that we can - // * finalize cycles in time. when you start a new cycle there could still be unfinished transactions for - // * syncSettleTime milliseconds. - // * - // * returns a negative number code if we can not determine the cycle - // */ - // getCycleNumberFromTimestamp(timestamp: number, allowOlder: boolean = true): number { - // let offsetTimestamp = timestamp + this.syncSettleTime - - // if (timestamp < 1 || timestamp == null) { - // let stack = new Error().stack - // this.statemanager_fatal(`getCycleNumberFromTimestamp ${timestamp}`, `getCycleNumberFromTimestamp ${timestamp} , ${stack}`) - // } - - // // const cycle = CycleChain.getCycleByTimestamp(offsetTimestamp) - // // if (cycle != null && cycle.counter != null) { - // // nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'first lookup') - // // return cycle.counter - // // } - - // //currentCycleShardData - // if (this.currentCycleShardData.timestamp <= offsetTimestamp && offsetTimestamp < this.currentCycleShardData.timestampEndCycle) { - // if (this.currentCycleShardData.cycleNumber == null) { - // this.statemanager_fatal('getCycleNumberFromTimestamp failed. cycleNumber == null', 'this.currentCycleShardData.cycleNumber == null') - // /* prettier-ignore */ nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'currentCycleShardData.cycleNumber fail') - // const cycle = CycleChain.getCycleByTimestamp(offsetTimestamp) - // console.log("CycleChain.getCycleByTimestamp", cycle) - // if (cycle != null) { - // this.statemanager_fatal('getCycleNumberFromTimestamp failed fatal redeemed', 'this.currentCycleShardData.cycleNumber == null, fatal redeemed') - // /* prettier-ignore */ nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'currentCycleShardData.cycleNumber redeemed') - // return cycle.counter - // } else { - // //debug only!!! - // let cycle2 = CycleChain.getCycleByTimestamp(offsetTimestamp) - // this.statemanager_fatal('getCycleNumberFromTimestamp failed fatal not redeemed', 'getCycleByTimestamp cycleNumber == null not redeemed') - // /* prettier-ignore */ nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'currentCycleShardData.cycleNumber failed to redeem') - // } - // } else { - // return this.currentCycleShardData.cycleNumber - // } - // } - - // if (this.currentCycleShardData.cycleNumber == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'this.currentCycleShardData.cycleNumber == null') - // this.statemanager_fatal('getCycleNumberFromTimestamp: currentCycleShardData.cycleNumber == null', `getCycleNumberFromTimestamp: currentCycleShardData.cycleNumber == null ${this.currentCycleShardData.cycleNumber} timestamp:${timestamp}`) - - // } - - // //is it in the future - // if (offsetTimestamp >= this.currentCycleShardData.timestampEndCycle) { - // let cycle: Shardus.Cycle = CycleChain.getNewest() - - // let timePastCurrentCycle = offsetTimestamp - this.currentCycleShardData.timestampEndCycle - // let cyclesAhead = Math.ceil(timePastCurrentCycle / (cycle.duration * 1000)) - // nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', `+${cyclesAhead}`) - - // return this.currentCycleShardData.cycleNumber + cyclesAhead - - // // let endOfNextCycle = this.currentCycleShardData.timestampEndCycle + cycle.duration * 1000 - // // if (offsetTimestamp < endOfNextCycle /*+ this.syncSettleTime*/) { - // // nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', '+1') - // // return this.currentCycleShardData.cycleNumber + 1 - // // } else if (offsetTimestamp < endOfNextCycle + /*this.syncSettleTime +*/ cycle.duration * 1000) { - // // nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', '+2') - // // //if (logFlags.error) this.mainLogger.error(`getCycleNumberFromTimestamp fail2: endOfNextCycle:${endOfNextCycle} offsetTimestamp:${offsetTimestamp} timestamp:${timestamp}`) - // // return this.currentCycleShardData.cycleNumber + 2 - // // } else { - // // nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'too far') - // // this.statemanager_fatal('getCycleNumberFromTimestamp: too far in future',`getCycleNumberFromTimestamp fail: too far in future. endOfNextCycle:${endOfNextCycle} - // // offsetTimestamp:${offsetTimestamp} timestamp:${timestamp} now:${Date.now()} end of cycle age: ${(Date.now() - endOfNextCycle)/1000}`) - // // //too far in the future - // // return -2 - // // } - // } - // if (allowOlder === true) { - // //cycle is in the past, by process of elimination - // // let offsetSeconds = Math.floor(offsetTimestamp * 0.001) - // const cycle = CycleChain.getCycleByTimestamp(offsetTimestamp) - // if (cycle != null) { - // nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'p2p lookup') - // if (cycle.counter == null) { - // this.statemanager_fatal('getCycleNumberFromTimestamp unexpected cycle.cycleNumber == null', 'getCycleNumberFromTimestamp unexpected cycle.cycleNumber == null') - // /* prettier-ignore */ nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', `getCycleNumberFromTimestamp unexpected cycle.cycleNumber == null ${timestamp}`) - // } - - // return cycle.counter - // } else { - // //nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'p2p lookup fail -estimate cycle') - // //debug only!!! - // //let cycle2 = CycleChain.getCycleByTimestamp(offsetTimestamp) - // //this.statemanager_fatal('getCycleNumberFromTimestamp getCycleByTimestamp failed', 'getCycleByTimestamp getCycleByTimestamp failed') - // let cycle: Shardus.Cycle = CycleChain.getNewest() - // let cycleEstimate = this.currentCycleShardData.cycleNumber - Math.ceil((this.currentCycleShardData.timestampEndCycle - offsetTimestamp) / (cycle.duration * 1000)) - // if (cycleEstimate < 1) { - // cycleEstimate = 1 - // } - // /* prettier-ignore */ nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'p2p lookup fail -estimate cycle: ' + cycleEstimate) - // return cycleEstimate - // } - // } - - // //failed to match, return -1 - // this.statemanager_fatal('getCycleNumberFromTimestamp failed final', `getCycleNumberFromTimestamp failed final ${timestamp}`) - // return -1 - // } - - //OLD parition report endpoint. - - // /post_partition_results (Partition_results) - // Partition_results - array of objects with the fields {Partition_id, Cycle_number, Partition_hash, Node_id, Node_sign} - // Returns nothing - // this.p2p.registerInternal( - // 'post_partition_results', - // /** - // * This is how to typedef a callback! - // * @param {{ partitionResults: PartitionResult[]; Cycle_number: number; }} payload - // * @param {any} respond TSConversion is it ok to just set respond to any? - // */ - // async (payload: PosPartitionResults, respond: any) => { - // // let result = {} - // // let ourLockID = -1 - // try { - // // ourLockID = await this.fifoLock('accountModification') - // // accountData = await this.app.getAccountDataByList(payload.accountIds) - // // Nodes collect the partition result from peers. - // // Nodes may receive partition results for partitions they are not covering and will ignore those messages. - // // Once a node has collected 50% or more peers giving the same partition result it can combine them to create a partition receipt. The node tries to create a partition receipt for all partitions it covers. - // // If the partition receipt has a different partition hash than the node, the node needs to ask one of the peers with the majority partition hash for the partition object and determine the transactions it has missed. - // // If the node is not able to create a partition receipt for a partition, the node needs to ask all peers which have a different partition hash for the partition object and determine the transactions it has missed. Only one peer for each different partition hash needs to be queried. Uses the /get_partition_txids API. - // // If the node has missed some transactions for a partition, the node needs to get these transactions from peers and apply these transactions to affected accounts starting with a known good copy of the account from the end of the last cycle. Uses the /get_transactions_by_list API. - // // If the node applied missed transactions to a partition, then it creates a new partition object, partition hash and partition result. - // // After generating new partition results as needed, the node broadcasts the set of partition results to N adjacent peers on each side; where N is the number of partitions covered by the node. - // // After receiving new partition results from peers, the node should be able to collect 50% or more peers giving the same partition result and build a partition receipt. - // // Any partition for which the node could not generate a partition receipt, should be logged as a fatal error. - // // Nodes save the partition receipt as proof that the transactions they have applied are correct and were also applied by peers. - // // if (logFlags.verbose) this.mainLogger.debug( ` _repair post_partition_results`) - // if (!payload) { - // if (logFlags.verbose) this.mainLogger.error(` _repair post_partition_results: abort no payload`) - // return - // } - // let partitionResults = payload.partitionResults - // let cycleKey = 'c' + payload.Cycle_number - // let allResponsesByPartition = this.allPartitionResponsesByCycleByPartition[cycleKey] - // if (!allResponsesByPartition) { - // allResponsesByPartition = {} - // this.allPartitionResponsesByCycleByPartition[cycleKey] = allResponsesByPartition - // } - // let ourPartitionResults = this.ourPartitionResultsByCycle[cycleKey] - // if (!payload.partitionResults) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair post_partition_results: abort, partitionResults == null`) - // return - // } - // if (payload.partitionResults.length === 0) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair post_partition_results: abort, partitionResults.length == 0`) - // return - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair post_partition_results payload: ${utils.stringifyReduce(payload)}`) - // if (!payload.partitionResults[0].sign) { - // // TODO security need to check that this is signed by a valid and correct node - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair post_partition_results: abort, no sign object on partition`) - // return - // } - // let owner = payload.partitionResults[0].sign.owner - // // merge results from this message into our colleciton of allResponses - // for (let partitionResult of partitionResults) { - // let partitionKey1 = 'p' + partitionResult.Partition_id - // let responses = allResponsesByPartition[partitionKey1] - // if (!responses) { - // responses = [] - // allResponsesByPartition[partitionKey1] = responses - // } - // // clean out an older response from same node if on exists - // responses = responses.filter((item) => item.sign == null || item.sign.owner !== owner) - // allResponsesByPartition[partitionKey1] = responses // have to re-assign this since it is a new ref to the array - // // add the result ot the list of responses - // if (partitionResult) { - // responses.push(partitionResult) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.error(` _repair post_partition_results partitionResult missing`) - // } - // /* prettier-ignore */ if (logFlags.verbose && this.stateManager.extendedRepairLogging) this.mainLogger.debug(` _repair post_partition_results partition: ${partitionResult.Partition_id} responses.length ${responses.length} cycle:${payload.Cycle_number}`) - // } - // var partitionKeys = Object.keys(allResponsesByPartition) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results partitionKeys: ${partitionKeys.length}`) - // // Loop through all the partition keys and check our progress for each partition covered - // // todo perf consider only looping through keys of partitions that changed from this update? - // for (let partitionKey of partitionKeys) { - // let responses = allResponsesByPartition[partitionKey] - // // if enough data, and our response is prepped. - // let repairTracker - // let partitionId = null // todo sharding ? need to deal with more that one partition response here!! - // if (responses.length > 0) { - // partitionId = responses[0].Partition_id - // repairTracker = this.stateManager.depricated._getRepairTrackerForCycle(payload.Cycle_number, partitionId) - // if (repairTracker.busy && repairTracker.awaitWinningHash === false) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results tracker busy. ${partitionKey} responses: ${responses.length}. ${utils.stringifyReduce(repairTracker)}`) - // continue - // } - // if (repairTracker.repairsFullyComplete) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results repairsFullyComplete = true cycle:${payload.Cycle_number}`) - // continue - // } - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results no responses. ${partitionKey} responses: ${responses.length}. repairTracker: ${utils.stringifyReduce(repairTracker)} responsesById: ${utils.stringifyReduce(allResponsesByPartition)}`) - // continue - // } - // let responsesRequired = 3 - // if (this.stateManager.useHashSets) { - // responsesRequired = Math.min(1 + Math.ceil(repairTracker.numNodes * 0.9), repairTracker.numNodes - 1) // get responses from 90% of the node we have sent to - // } - // // are there enough responses to try generating a receipt? - // if (responses.length >= responsesRequired && (repairTracker.evaluationStarted === false || repairTracker.awaitWinningHash)) { - // repairTracker.evaluationStarted = true - // let ourResult = null - // if (ourPartitionResults != null) { - // for (let obj of ourPartitionResults) { - // if (obj.Partition_id === partitionId) { - // ourResult = obj - // break - // } - // } - // } - // if (ourResult == null) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results our result is not computed yet `) - // // Todo repair : may need to sleep or restart this computation later.. - // return - // } - // let receiptResults = this.tryGeneratePartitionReciept(responses, ourResult) // TODO: how to mark block if we are already on a thread for this? - // let { partitionReceipt, topResult, success } = receiptResults - // if (!success) { - // if (repairTracker.awaitWinningHash) { - // if (topResult == null) { - // // if we are awaitWinningHash then wait for a top result before we start repair process again - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair awaitWinningHash:true but topResult == null so keep waiting `) - // continue - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair awaitWinningHash:true and we have a top result so start reparing! `) - // } - // } - // if (this.resetAndApplyPerPartition === false && repairTracker.txRepairReady === true) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair txRepairReady:true bail here for some strange reason.. not sure aout this yet `) - // continue - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results: tryGeneratePartitionReciept failed start repair process 1 ${utils.stringifyReduce(receiptResults)}`) - // let cycle = this.p2p.state.getCycleByCounter(payload.Cycle_number) - // await this.startRepairProcess(cycle, topResult, partitionId, ourResult.Partition_hash) - // } else if (partitionReceipt) { - // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug( ` _repair post_partition_results: success store partition receipt`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results 3 allFinished, final cycle: ${payload.Cycle_number} hash:${utils.stringifyReduce({ topResult })}`) - // // do we ever send partition receipt yet? - // this.stateManager.storePartitionReceipt(payload.Cycle_number, partitionReceipt) - // this.stateManager.depricated.repairTrackerMarkFinished(repairTracker, 'post_partition_results') - // } - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(` _repair post_partition_results not enough responses awaitWinningHash: ${repairTracker.awaitWinningHash} resp: ${responses.length}. required:${responsesRequired} repairTracker: ${utils.stringifyReduce(repairTracker)}`) - // } - // // End of loop over partitions. Continue looping if there are other partions that we need to check for completion. - // } - // } finally { - // // this.fifoUnlock('accountModification', ourLockID) - // } - // // result.accountData = accountData - // // await respond(result) - // } - // ) - - // /** - // * all this does now is set syncPartitionsStarted = true. should be depricated - // */ - // async startSyncPartitions() { - // // await this.createInitialAccountBackups() // nm this is now part of regular data sync - // // register our handlers - - // // this._registerListener(this.p2p.state, 'cycle_q1_start', async (lastCycle, time) => { - // // this.updateShardValues(lastCycle.counter) - // // }) - - // this.syncPartitionsStarted = true - - // // this.stateManager._registerListener(this.p2p.state, 'cycle_q2_start', async (lastCycle: Shardus.Cycle, time: number) => { - // // // await this.processPreviousCycleSummaries() - // // // lastCycle = this.p2p.state.getLastCycle() - // // // if (lastCycle == null) { - // // // return - // // // } - // // // let lastCycleShardValues = this.stateManager.shardValuesByCycle.get(lastCycle.counter) - // // // if (lastCycleShardValues == null) { - // // // return - // // // } - // // // if(this.currentCycleShardData == null){ - // // // return - // // // } - // // // if (this.currentCycleShardData.ourNode.status !== 'active') { - // // // // dont participate just yet. - // // // return - // // // } - // // // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug( ` _repair startSyncPartitions:cycle_q2_start cycle: ${lastCycle.counter}`) - // // // // this will take temp TXs and make sure they are stored in the correct place for us to generate partitions - // // // this.processTempTXs(lastCycle) - // // // // During the Q2 phase of a cycle, nodes compute the partition hash of the previous cycle for all the partitions covered by the node. - // // // // Q2 was chosen so that any transactions submitted with a time stamp that falls in the previous quarter will have been processed and finalized. This could be changed to Q3 if we find that more time is needed. - // // // this.generatePartitionObjects(lastCycle) - // // // let receiptMapResults = this.generateReceiptMapResults(lastCycle) - // // // if(logFlags.verbose) this.mainLogger.debug( `receiptMapResults: ${stringify(receiptMapResults)}`) - // // // let statsClump = this.partitionStats.getCoveredStatsPartitions(lastCycleShardValues) - // // // //build partition hashes from previous full cycle - // // // let mainHashResults:MainHashResults = null - // // // if(this.currentCycleShardData && this.currentCycleShardData.ourNode.status === 'active'){ - // // // mainHashResults = this.accountCache.buildPartitionHashesForNode(this.currentCycleShardData) - // // // } - // // // // Hook for Snapshot module to listen to after partition data is settled - // // // this.emit('cycleTxsFinalized', lastCycleShardValues, receiptMapResults, statsClump, mainHashResults) - // // // this.dumpAccountDebugData2(mainHashResults) - // // // // pre-allocate the next cycle data to be safe! - // // // let prekey = 'c' + (lastCycle.counter + 1) - // // // this.partitionObjectsByCycle[prekey] = [] - // // // this.ourPartitionResultsByCycle[prekey] = [] - // // // // Nodes generate the partition result for all partitions they cover. - // // // // Nodes broadcast the set of partition results to N adjacent peers on each side; where N is - // // // // the number of partitions covered by the node. Uses the /post_partition_results API. - // // // await this.broadcastPartitionResults(lastCycle.counter) // Cycle_number - // // }) - - // /* this._registerListener(this.p2p.state, 'cycle_q4_start', async (lastCycle, time) => { - // // Also we would like the repair process to finish by the end of Q3 and definitely before the start of a new cycle. Otherwise the cycle duration may need to be increased. - // }) */ - // } - - // static computeNodePartitionDataMapExt( - // shardGlobals: StateManager.shardFunctionTypes.ShardGlobals, - // nodeShardDataMap: StateManager.shardFunctionTypes.NodeShardDataMap, - // nodesToGenerate: Shardus.Node[], - // parititionShardDataMap: StateManager.shardFunctionTypes.ParititionShardDataMap, - // activeNodes: Shardus.Node[] - // ) { - // // for (let node of nodesToGenerate) { - // // let nodeShardData = nodeShardDataMap.get(node.id) - // // if (!nodeShardData) { - // // nodeShardData = ShardFunctions.computeNodePartitionData(shardGlobals, node, nodeShardDataMap, parititionShardDataMap, activeNodes) - // // } - // // // ShardFunctions.computeExtendedNodePartitionData(shardGlobals, nodeShardDataMap, parititionShardDataMap, nodeShardData, activeNodes) - // // // - // // // this wont be able to extend things though. - // // ShardFunctions.updateFullConsensusGroup(shardGlobals, nodeShardDataMap, parititionShardDataMap, nodeShardData, activeNodes) - // // } - // } - - // static updateFullConsensusGroup (shardGlobals: ShardGlobals, nodeShardDataMap: NodeShardDataMap, parititionShardDataMap: ParititionShardDataMap, nodeShardData: NodeShardData, activeNodes: Shardus.Node[]) { - // let homePartition = nodeShardData.homePartition - // let shardPartitionData = parititionShardDataMap.get(homePartition) - - // if(shardPartitionData == null){ - // throw new Error('updateFullConsensusGroup: shardPartitionData==null') - // } - - // nodeShardData.consensusNodeForOurNodeFull = Object.values(shardPartitionData.coveredBy) - // nodeShardData.needsUpdateToFullConsensusGroup = false - // nodeShardData.consensusNodeForOurNodeFull.sort(ShardFunctions.nodeSortAsc) - - // // merge into our full list for sake of TX calcs. todo could try to be smart an only do this in some cases. - // // let [results] = ShardFunctions.mergeNodeLists(nodeShardData.nodeThatStoreOurParition, nodeShardData.consensusNodeForOurNodeFull) - // // switched nodeThatStoreOurParition to nodeThatStoreOurParitionFull to improve the quality of the results. - // let [results] = ShardFunctions.mergeNodeLists(nodeShardData.nodeThatStoreOurParitionFull, nodeShardData.consensusNodeForOurNodeFull) - - // // not sure if we need to do this - // // if (extras.length > 0) { - // // ShardFunctions.dilateNeighborCoverage(shardGlobals, nodeShardDataMap, parititionShardDataMap, activeNodes, nodeShardData, extras) - // // } - - // nodeShardData.nodeThatStoreOurParitionFull = results - // nodeShardData.nodeThatStoreOurParitionFull.sort(ShardFunctions.nodeSortAsc) - // } - - // GLOBAL CLEANUP Depricated this code. it was for maintaining global account history that is not needed now. - - // knownGlobals: { [id: string]: boolean } // will just use the above set now as a simplification - - /** Need the ablity to get account copies and use them later when applying a transaction. how to use the right copy or even know when to use this at all? */ - /** Could go by cycle number. if your cycle matches the one in is list use it? */ - /** What if the global account is transformed several times durring that cycle. oof. */ - /** ok best thing to do is to store the account every time it changes for a given period of time. */ - /** how to handle reparing a global account... yikes that is hard. */ - //globalAccountRepairBank: Map - - // getGlobalAccountValueAtTime(accountId: string, oldestTimestamp: number): Shardus.AccountsCopy | null { - // let result: Shardus.AccountsCopy | null = null - // let globalBackupList: Shardus.AccountsCopy[] = this.getGlobalAccountBackupList(accountId) - // if (globalBackupList == null || globalBackupList.length === 0) { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalBackupList', `applyAllPreparedRepairs - missing value for ${accountId}`) - // return null - // } - - // //else fine the closest time lower than our input time - // //non binary search, just start at then end and go backwards. - // //TODO PERF make this a binary search. realistically the lists should be pretty short most of the time - // if (globalBackupList.length >= 1) { - // for (let i = globalBackupList.length - 1; i >= 0; i--) { - // let accountCopy = globalBackupList[i] - // if (accountCopy.timestamp <= oldestTimestamp) { - // return accountCopy - // } - // } - // } - // return null - // } - - // sortByTimestamp(a: any, b: any): number { - // return utils.sortAscProp(a, b, 'timestamp') - // } - - // sortAndMaintainBackupList(globalBackupList: Shardus.AccountsCopy[], oldestTimestamp: number): void { - // globalBackupList.sort(utils.sortTimestampAsc) // this.sortByTimestamp) - // //remove old entries. then bail. - // // note this loop only runs if there is more than one entry - // // also it should always keep the last item in the list now matter what (since that is the most current backup) - // // this means we only start if there are 2 items in the array and we start at index len-2 (next to last element) - // if (globalBackupList.length > 1) { - // for (let i = globalBackupList.length - 2; i >= 0; i--) { - // let accountCopy = globalBackupList[i] - // if (accountCopy.timestamp < oldestTimestamp) { - // globalBackupList.splice(i, 1) - // } - // } - // } - // } - - // - // sortAndMaintainBackups(oldestTimestamp: number): void { - // let keys = this.globalAccountRepairBank.keys() - // for (let key of keys) { - // let globalBackupList = this.globalAccountRepairBank.get(key) - // if (globalBackupList != null) { - // this.sortAndMaintainBackupList(globalBackupList, oldestTimestamp) - // } - // } - // } - - // getGlobalAccountBackupList(accountID: string): Shardus.AccountsCopy[] { - // let results: Shardus.AccountsCopy[] = [] - // if (this.globalAccountRepairBank.has(accountID) === false) { - // this.globalAccountRepairBank.set(accountID, results) //init list - // } else { - // results = this.globalAccountRepairBank.get(accountID) - // } - // return results - // } - - //statsDataSummaryUpdate(accountDataBefore:any, accountDataAfter:Shardus.WrappedData){ - // statsDataSummaryUpdate(cycle: number, accountData: Shardus.WrappedResponse, debugMsg:string) { - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData enter:statsDataSummaryUpdate c:${cycle} ${debugMsg} accForBin:${utils.makeShortHash(accountData.accountId)} inputs:${JSON.stringify({accountData})}`) - - // let blob: StateManagerTypes.StateManagerTypes.SummaryBlob = this.getSummaryBlob(accountData.accountId) - // blob.counter++ - // if (accountData.data == null) { - // blob.errorNull += 10000 - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate errorNull 1`) - // return - // } - // if (accountData.prevDataCopy == null) { - // blob.errorNull += 1000000 - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate errorNull 2`) - // return - // } - - // // if(this.useSeenAccountMap === true){ - // // let accountId = accountData.accountId - // // let timestamp = accountData.timestamp // this.app.getAccountTimestamp(accountId) - // // let hash = accountData.stateId //this.app.getStateId(accountId) - - // // if(this.seenCreatedAccounts.has(accountId)){ - // // let accountMemData:AccountMemoryCache = this.seenCreatedAccounts.get(accountId) - // // if(accountMemData.t > timestamp){ - // // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: good error?: dont update stats with older data skipping update ${utils.makeShortHash(accountId)}`) - // // return - // // } - // // } else { - // // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: did not find seen account`) - // // } - - // // let accountMemDataUpdate:AccountMemoryCache = {t:timestamp, h:hash} - // // this.seenCreatedAccounts.set(accountId, accountMemDataUpdate) - // // } - - // let accountId = accountData.accountId - // let timestamp = accountData.timestamp // this.app.getAccountTimestamp(accountId) - // let hash = accountData.stateId - - // if (this.accountCache.hasAccount(accountId)) { - // let accountMemData: AccountHashCache = this.accountCache.getAccountHash(accountId) - // if (accountMemData.t > timestamp) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: good error?: dont update stats with older data skipping update ${utils.makeShortHash(accountId)}`) - // return - // } - // } else { - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: did not find seen account`) - // } - // this.accountCache.updateAccountHash(accountId, hash, timestamp, cycle) - - // if (cycle > blob.latestCycle) { - // blob.latestCycle = cycle - // } - // this.app.dataSummaryUpdate(blob.opaqueBlob, accountData.prevDataCopy, accountData.data) - - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData:statsDataSummaryUpdate c:${cycle} ${debugMsg} accForBin:${utils.makeShortHash(accountId)} ${this.debugAccountData(accountData.data)} - ${this.debugAccountData(accountData.prevDataCopy)}`) - // if(this.invasiveDebugInfo) this.addDebugToBlob(blob, accountId) - - // } - - // statsDataSummaryInit(cycle: number, accountData: Shardus.WrappedData, debugMsg:string) { - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData enter:statsDataSummaryInit c:${cycle} ${debugMsg} accForBin:${utils.makeShortHash(accountData.accountId)} inputs:${JSON.stringify({accountData})}`) - - // let blob: StateManagerTypes.StateManagerTypes.SummaryBlob = this.getSummaryBlob(accountData.accountId) - // blob.counter++ - - // // if(this.useSeenAccountMap === true && this.seenCreatedAccounts.has(accountData.accountId)){ - // // // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`statsDataSummaryInit seenCreatedAccounts dupe: ${utils.stringifyReduce(accountData.accountId)}`) - // // return - // // } - // // if(this.useSeenAccountMap === true){ - // // let accountMemData:AccountMemoryCache = {t:accountData.timestamp, h:accountData.stateId} - // // this.seenCreatedAccounts.set(accountData.accountId, accountMemData) - // // } - - // if (this.accountCache.hasAccount(accountData.accountId)) { - // return - // } - // this.accountCache.updateAccountHash(accountData.accountId, accountData.stateId, accountData.timestamp, cycle) - - // if (accountData.data == null) { - // blob.errorNull++ - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryInit errorNull`) - // return - // } - // if (cycle > blob.latestCycle) { - // blob.latestCycle = cycle - // } - // this.app.dataSummaryInit(blob.opaqueBlob, accountData.data) - - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData:statsDataSummaryInit c:${cycle} accForBin:${utils.makeShortHash(accountData.accountId)} ${this.debugAccountData(accountData.data)}`) - // if(this.invasiveDebugInfo) this.addDebugToBlob(blob, accountData.accountId) - // } - - // statsDataSummaryUpdate2(cycle: number, accountDataBefore: any, accountDataAfter: Shardus.WrappedData, debugMsg:string) { - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData enter:statsDataSummaryUpdate2 c:${cycle} accForBin:${utils.makeShortHash(accountDataAfter.accountId)} inputs:${JSON.stringify({accountDataBefore , accountDataAfter })}`) - - // let blob: StateManagerTypes.StateManagerTypes.SummaryBlob = this.getSummaryBlob(accountDataAfter.accountId) - // blob.counter++ - // if (accountDataAfter.data == null) { - // blob.errorNull += 100000000 - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate2 errorNull 1`) - // return - // } - // if (accountDataBefore == null) { - // blob.errorNull += 10000000000 - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate2 errorNull 2`) - // return - // } - - // // if(this.useSeenAccountMap === true){ - // // let accountId = accountDataAfter.accountId - // // let timestamp = accountDataAfter.timestamp // this.app.getAccountTimestamp(accountId) - // // let hash = accountDataAfter.stateId //this.app.getStateId(accountId) - - // // if(this.seenCreatedAccounts.has(accountId)){ - // // let accountMemData:AccountMemoryCache = this.seenCreatedAccounts.get(accountId) - // // if(accountMemData.t > timestamp){ - // // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: good error?: 2: dont update stats with older data skipping update ${utils.makeShortHash(accountId)}`) - // // return - // // } - // // } else { - // // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: did not find seen account: 2`) - // // } - - // // let accountMemDataUpdate:AccountMemoryCache = {t:timestamp, h:hash} - // // this.seenCreatedAccounts.set(accountId, accountMemDataUpdate) - // // } - - // let accountId = accountDataAfter.accountId - // let timestamp = accountDataAfter.timestamp // this.app.getAccountTimestamp(accountId) - // let hash = accountDataAfter.stateId //this.app.getStateId(accountId) - - // if (this.accountCache.hasAccount(accountId)) { - // let accountMemData: AccountHashCache = this.accountCache.getAccountHash(accountId) - // if (accountMemData.t > timestamp) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: good error?: 2: dont update stats with older data skipping update ${utils.makeShortHash(accountId)}`) - // return - // } - // } else { - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryUpdate: did not find seen account: 2`) - // } - // this.accountCache.updateAccountHash(accountId, hash, timestamp, cycle) - - // if (cycle > blob.latestCycle) { - // blob.latestCycle = cycle - // } - - // this.app.dataSummaryUpdate(blob.opaqueBlob, accountDataBefore, accountDataAfter.data) - - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData:statsDataSummaryUpdate2 c:${cycle} accForBin:${utils.makeShortHash(accountDataAfter.accountId)} ${this.debugAccountData(accountDataAfter.data)} - ${this.debugAccountData(accountDataBefore)}`) - // if(this.invasiveDebugInfo) this.addDebugToBlob(blob, accountDataAfter.accountId) - - // } - - // statsDataSummaryInitRaw(cycle: number, accountId: string, accountDataRaw: any, debugMsg:string) { - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData enter:statsDataSummaryInitRaw c:${cycle} ${debugMsg} accForBin:${utils.makeShortHash(accountId)} inputs:${JSON.stringify({accountDataRaw})}`) - - // let blob: StateManagerTypes.StateManagerTypes.SummaryBlob = this.getSummaryBlob(accountId) - // blob.counter++ - - // // if(this.useSeenAccountMap === true && this.seenCreatedAccounts.has(accountId)){ - // // // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`statsDataSummaryInitRaw seenCreatedAccounts dupe: ${utils.stringifyReduce(accountId)}`) - // // return - // // } - // // if(this.useSeenAccountMap === true){ - // // // let timestamp = this.app.getAccountTimestamp(accountId) - // // // let hash = this.app.getStateId(accountId) - - // // let accountInfo = this.app.getTimestampAndHashFromAccount(accountDataRaw) - - // // //let accountMemData:AccountMemoryCache = {t:0, h:'uninit'} - // // let accountMemData:AccountMemoryCache = {t:accountInfo.timestamp, h:accountInfo.hash} - // // this.seenCreatedAccounts.set(accountId, accountMemData) - // // } - - // if (this.accountCache.hasAccount(accountId)) { - // return - // } - // let accountInfo = this.app.getTimestampAndHashFromAccount(accountDataRaw) - // this.accountCache.updateAccountHash(accountId, accountInfo.hash, accountInfo.timestamp, cycle) - - // if (accountDataRaw == null) { - // blob.errorNull++ - // if (logFlags.error) this.mainLogger.error(`statsDataSummaryInitRaw errorNull`) - // return - // } - - // //crap we lack a queue. newer stuff still gets in. - // if (cycle > blob.latestCycle) { - // blob.latestCycle = cycle - // } - - // this.app.dataSummaryInit(blob.opaqueBlob, accountDataRaw) - - // if(this.invasiveDebugInfo) this.mainLogger.debug(`statData:statsDataSummaryInitRaw c:${cycle} accForBin:${utils.makeShortHash(accountId)} ${this.debugAccountData(accountDataRaw)}`) - // if(this.invasiveDebugInfo) this.addDebugToBlob(blob, accountId) - - // } - - // //the return value is a bit obtuse. should decide if a list or map output is better, or are they both needed. - // getStoredSnapshotPartitions(cycleShardData: CycleShardData): { list: number[]; map: Map } { - // //figure out which summary partitions are fully covered by - // let result = { list: [], map: new Map() } - // for (let i = 0; i < this.summaryPartitionCount; i++) { - // // 2^32 4294967296 or 0xFFFFFFFF + 1 - // let addressLowNum = (i / this.summaryPartitionCount) * (0xffffffff + 1) - // let addressHighNum = ((i + 1) / this.summaryPartitionCount) * (0xffffffff + 1) - 1 - // let inRangeLow = ShardFunctions.testAddressNumberInRange(addressLowNum, cycleShardData.nodeShardData.storedPartitions) - // let inRangeHigh = false - // if (inRangeLow) { - // inRangeHigh = ShardFunctions.testAddressNumberInRange(addressHighNum, cycleShardData.nodeShardData.storedPartitions) - // } - // if (inRangeLow && inRangeHigh) { - // result.list.push(i) - // result.map.set(i, true) - // } - // } - // return result - // } - - /** - * dumpAccountDebugData this is what creats the shardreports - */ - // async dumpAccountDebugData() { - // if (this.currentCycleShardData == null) { - // return - // } - - // // hmm how to deal with data that is changing... it cant!! - // let partitionMap = this.currentCycleShardData.parititionShardDataMap - - // let ourNodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = this.currentCycleShardData.nodeShardData - // // partittions: - // let partitionDump: DebugDumpPartitions = { partitions: [], cycle: 0, rangesCovered: {} as DebugDumpRangesCovered, - // nodesCovered: {} as DebugDumpNodesCovered, allNodeIds: [], globalAccountIDs: [], globalAccountSummary: [], - // globalStateHash: '', calculationTime: this.currentCycleShardData.calculationTime } - // partitionDump.cycle = this.currentCycleShardData.cycleNumber - - // // todo port this to a static stard function! - // // check if we are in the consenus group for this partition - // let minP = ourNodeShardData.consensusStartPartition // storedPartitions.partitionStart - // let maxP = ourNodeShardData.consensusEndPartition // storedPartitions.partitionEnd - // partitionDump.rangesCovered = { ipPort: `${ourNodeShardData.node.externalIp}:${ourNodeShardData.node.externalPort}`, id: utils.makeShortHash(ourNodeShardData.node.id), fracID: ourNodeShardData.nodeAddressNum / 0xffffffff, hP: ourNodeShardData.homePartition, cMin: minP, cMax: maxP, stMin: ourNodeShardData.storedPartitions.partitionStart, stMax: ourNodeShardData.storedPartitions.partitionEnd, numP: this.currentCycleShardData.shardGlobals.numPartitions } - - // // todo print out coverage map by node index - - // partitionDump.nodesCovered = { idx: ourNodeShardData.ourNodeIndex, ipPort: `${ourNodeShardData.node.externalIp}:${ourNodeShardData.node.externalPort}`, id: utils.makeShortHash(ourNodeShardData.node.id), fracID: ourNodeShardData.nodeAddressNum / 0xffffffff, hP: ourNodeShardData.homePartition, consensus: [], stored: [], extra: [], numP: this.currentCycleShardData.shardGlobals.numPartitions } - - // for (let node of ourNodeShardData.consensusNodeForOurNode) { - // let nodeData = this.currentCycleShardData.nodeShardDataMap.get(node.id) - // //@ts-ignore just debug junk - // partitionDump.nodesCovered.consensus.push({ idx: nodeData.ourNodeIndex, hp: nodeData.homePartition }) - // } - // for (let node of ourNodeShardData.nodeThatStoreOurParitionFull) { - // let nodeData = this.currentCycleShardData.nodeShardDataMap.get(node.id) - // //@ts-ignore just debug junk - // partitionDump.nodesCovered.stored.push({ idx: nodeData.ourNodeIndex, hp: nodeData.homePartition }) - // } - - // if (this.currentCycleShardData.ourNode.status === 'active') { - // for (var [key, value] of partitionMap) { - // let partition: DebugDumpPartition = { parititionID: key, accounts: [], skip: {} as DebugDumpPartitionSkip } - // partitionDump.partitions.push(partition) - - // // normal case - // if (maxP > minP) { - // // are we outside the min to max range - // if (key < minP || key > maxP) { - // partition.skip = { p: key, min: minP, max: maxP } - // continue - // } - // } else if (maxP === minP) { - // if (key !== maxP) { - // partition.skip = { p: key, min: minP, max: maxP, noSpread: true } - // continue - // } - // } else { - // // are we inside the min to max range (since the covered rage is inverted) - // if (key > maxP && key < minP) { - // partition.skip = { p: key, min: minP, max: maxP, inverted: true } - // continue - // } - // } - - // let partitionShardData = value - // let accountStart = partitionShardData.homeRange.low - // let accountEnd = partitionShardData.homeRange.high - // let wrappedAccounts = await this.app.getAccountData(accountStart, accountEnd, 10000000) - // // { accountId: account.address, stateId: account.hash, data: account, timestamp: account.timestamp } - // let duplicateCheck = {} - // for (let wrappedAccount of wrappedAccounts) { - // if (duplicateCheck[wrappedAccount.accountId] != null) { - // continue - // } - // duplicateCheck[wrappedAccount.accountId] = true - // let v = wrappedAccount.data.balance // hack, todo maybe ask app for a debug value - // if (this.app.getAccountDebugValue != null) { - // v = this.app.getAccountDebugValue(wrappedAccount) - // } - // partition.accounts.push({ id: wrappedAccount.accountId, hash: wrappedAccount.stateId, v: v }) - // } - - // partition.accounts.sort(this._sortByIdAsc) - // } - - // //partitionDump.allNodeIds = [] - // for (let node of this.currentCycleShardData.activeNodes) { - // partitionDump.allNodeIds.push(utils.makeShortHash(node.id)) - // } - - // partitionDump.globalAccountIDs = Array.from(this.accountGlobals.globalAccountSet.keys()) - // partitionDump.globalAccountIDs.sort() - // // dump information about consensus group and edge nodes for each partition - // // for (var [key, value] of this.currentCycleShardData.parititionShardDataMap){ - - // // } - - // let {globalAccountSummary, globalStateHash} = this.accountGlobals.getGlobalDebugReport() - // partitionDump.globalAccountSummary = globalAccountSummary - // partitionDump.globalStateHash = globalStateHash - // } else { - // if (this.currentCycleShardData != null && this.currentCycleShardData.activeNodes.length > 0) { - // for (let node of this.currentCycleShardData.activeNodes) { - // partitionDump.allNodeIds.push(utils.makeShortHash(node.id)) - // } - // } - // } - - // this.lastShardReport = utils.stringifyReduce(partitionDump) - // this.shardLogger.debug(this.lastShardReport) - // } - - // /** - // * dumpAccountDebugData2 a temporary version that also uses stats data - // */ - // async dumpAccountDebugData2(mainHashResults: MainHashResults) { - // if (this.currentCycleShardData == null) { - // return - // } - - // // hmm how to deal with data that is changing... it cant!! - // let partitionMap = this.currentCycleShardData.parititionShardDataMap - - // let ourNodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = this.currentCycleShardData.nodeShardData - // // partittions: - // let partitionDump: DebugDumpPartitions = { partitions: [], cycle: 0, rangesCovered: {} as DebugDumpRangesCovered, - // nodesCovered: {} as DebugDumpNodesCovered, allNodeIds: [], globalAccountIDs: [], globalAccountSummary: [], - // globalStateHash: '', calculationTime: this.currentCycleShardData.calculationTime } - // partitionDump.cycle = this.currentCycleShardData.cycleNumber - - // // todo port this to a static stard function! - // // check if we are in the consenus group for this partition - // let minP = ourNodeShardData.consensusStartPartition // storedPartitions.partitionStart - // let maxP = ourNodeShardData.consensusEndPartition // storedPartitions.partitionEnd - - // // let minP = ourNodeShardData.storedPartitions.partitionStart - // // let maxP = ourNodeShardData.storedPartitions.partitionEnd - - // let cMin = ourNodeShardData.consensusStartPartition - // let cMax = ourNodeShardData.consensusEndPartition - - // partitionDump.rangesCovered = { ipPort: `${ourNodeShardData.node.externalIp}:${ourNodeShardData.node.externalPort}`, id: utils.makeShortHash(ourNodeShardData.node.id), fracID: ourNodeShardData.nodeAddressNum / 0xffffffff, hP: ourNodeShardData.homePartition, cMin: cMin, cMax: cMax, stMin: ourNodeShardData.storedPartitions.partitionStart, stMax: ourNodeShardData.storedPartitions.partitionEnd, numP: this.currentCycleShardData.shardGlobals.numPartitions } - - // // todo print out coverage map by node index - - // partitionDump.nodesCovered = { idx: ourNodeShardData.ourNodeIndex, ipPort: `${ourNodeShardData.node.externalIp}:${ourNodeShardData.node.externalPort}`, id: utils.makeShortHash(ourNodeShardData.node.id), fracID: ourNodeShardData.nodeAddressNum / 0xffffffff, hP: ourNodeShardData.homePartition, consensus: [], stored: [], extra: [], numP: this.currentCycleShardData.shardGlobals.numPartitions } - - // for (let node of ourNodeShardData.consensusNodeForOurNode) { - // let nodeData = this.currentCycleShardData.nodeShardDataMap.get(node.id) - // //@ts-ignore just debug junk - // partitionDump.nodesCovered.consensus.push({ idx: nodeData.ourNodeIndex, hp: nodeData.homePartition }) - // } - // for (let node of ourNodeShardData.nodeThatStoreOurParitionFull) { - // let nodeData = this.currentCycleShardData.nodeShardDataMap.get(node.id) - // //@ts-ignore just debug junk - // partitionDump.nodesCovered.stored.push({ idx: nodeData.ourNodeIndex, hp: nodeData.homePartition }) - // } - - // if (this.currentCycleShardData.ourNode.status === 'active') { - // for (var [key, value] of partitionMap) { - // let partition: DebugDumpPartition = { parititionID: key, accounts: [], accounts2: [], skip: {} as DebugDumpPartitionSkip } - // partitionDump.partitions.push(partition) - - // // normal case - // if (maxP > minP) { - // // are we outside the min to max range - // if (key < minP || key > maxP) { - // partition.skip = { p: key, min: minP, max: maxP } - // continue - // } - // } else if (maxP === minP) { - // if (key !== maxP) { - // partition.skip = { p: key, min: minP, max: maxP, noSpread: true } - // continue - // } - // } else { - // // are we inside the min to max range (since the covered rage is inverted) - // if (key > maxP && key < minP) { - // partition.skip = { p: key, min: minP, max: maxP, inverted: true } - // continue - // } - // } - - // let partitionShardData = value - // let accountStart = partitionShardData.homeRange.low - // let accountEnd = partitionShardData.homeRange.high - - // if (this.debugFeature_dumpAccountDataFromSQL === true) { - // let wrappedAccounts = await this.app.getAccountData(accountStart, accountEnd, 10000000) - // // { accountId: account.address, stateId: account.hash, data: account, timestamp: account.timestamp } - // let duplicateCheck = {} - // for (let wrappedAccount of wrappedAccounts) { - // if (duplicateCheck[wrappedAccount.accountId] != null) { - // continue - // } - // duplicateCheck[wrappedAccount.accountId] = true - // let v = wrappedAccount.data.balance // hack, todo maybe ask app for a debug value - // if (this.app.getAccountDebugValue != null) { - // v = this.app.getAccountDebugValue(wrappedAccount) - // } - // partition.accounts.push({ id: wrappedAccount.accountId, hash: wrappedAccount.stateId, v: v }) - // } - - // partition.accounts.sort(this._sortByIdAsc) - // } - - // // Take the cache data report and fill out accounts2 and partitionHash2 - // if (mainHashResults.partitionHashResults.has(partition.parititionID)) { - // let partitionHashResults = mainHashResults.partitionHashResults.get(partition.parititionID) - // for (let index = 0; index < partitionHashResults.hashes.length; index++) { - // let id = partitionHashResults.ids[index] - // let hash = partitionHashResults.hashes[index] - // let v = `{t:${partitionHashResults.timestamps[index]}}` - // partition.accounts2.push({ id, hash, v }) - // } - // partition.partitionHash2 = partitionHashResults.hashOfHashes - // } - // } - - // //partitionDump.allNodeIds = [] - // for (let node of this.currentCycleShardData.activeNodes) { - // partitionDump.allNodeIds.push(utils.makeShortHash(node.id)) - // } - - // partitionDump.globalAccountIDs = Array.from(this.accountGlobals.globalAccountSet.keys()) - // partitionDump.globalAccountIDs.sort() - // // dump information about consensus group and edge nodes for each partition - // // for (var [key, value] of this.currentCycleShardData.parititionShardDataMap){ - - // // } - - // let {globalAccountSummary, globalStateHash} = this.accountGlobals.getGlobalDebugReport() - // partitionDump.globalAccountSummary = globalAccountSummary - // partitionDump.globalStateHash = globalStateHash - - // } else { - // if (this.currentCycleShardData != null && this.currentCycleShardData.activeNodes.length > 0) { - // for (let node of this.currentCycleShardData.activeNodes) { - // partitionDump.allNodeIds.push(utils.makeShortHash(node.id)) - // } - // } - // } - - // this.lastShardReport = utils.stringifyReduce(partitionDump) - // this.shardLogger.debug(this.lastShardReport) - // //this.shardLogger.debug(utils.stringifyReduce(partitionDump)) - // } - - // /** - // * syncStateDataForRange - // * syncs accountData with the help of stateTable data for a given address range - // * @param {SimpleRange} range - // */ - // async syncStateDataForRange(range: SimpleRange) { - // try { - // let partition = 'notUsed' - // this.currentRange = range - // this.addressRange = range // this.partitionToAddressRange(partition) - - // this.partitionStartTimeStamp = Date.now() - - // let lowAddress = this.addressRange.low - // let highAddress = this.addressRange.high - - // partition = `${utils.stringifyReduce(lowAddress)} - ${utils.stringifyReduce(highAddress)}` - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateDataForPartition partition: ${partition} `) - - // if(this.useStateTable === true){ - // await this.syncStateTableData(lowAddress, highAddress, 0, Date.now() - this.stateManager.syncSettleTime) - // } - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: partition: ${partition}, syncStateTableData 1st pass done.`) - - // /* prettier-ignore */ nestedCountersInstance.countEvent('sync', `sync partition: ${partition} start: ${this.stateManager.currentCycleShardData.cycleNumber}`) - - // this.readyforTXs = true // open the floodgates of queuing stuffs. - - // await this.syncAccountData(lowAddress, highAddress) - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: partition: ${partition}, syncAccountData done.`) - - // // potentially do the next 2 blocks periodically in the account data retreval so we can flush data to disk! generalize the account state table update so it can be called 'n' times - - // // Sync the Account State Table Second Pass - // // Wait at least 10T since the Ts_end time of the First Pass - // // Same as the procedure for First Pass except: - // // Ts_start should be the Ts_end value from last time and Ts_end value should be current time minus 10T - // if(this.useStateTable === true){ - // await this.syncStateTableData(lowAddress, highAddress, this.lastStateSyncEndtime, Date.now()) - // } - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: partition: ${partition}, syncStateTableData 2nd pass done.`) - - // // Process the Account data - // // For each account in the Account data make sure the entry in the Account State Table has the same State_after value; if not remove the record from the Account data - // // For each account in the Account State Table make sure the entry in Account data has the same State_after value; if not save the account id to be looked up later - // // Use the App.set_account_data function with the Account data to save the data to the application Accounts Table; if any failed accounts are returned save the account id to be looked up later - // let accountsSaved = await this.processAccountData() - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: partition: ${partition}, processAccountData done.`) - - // // Sync the failed accounts - // // Log that some account failed - // // Use the /get_account_data_by_list API to get the data for the accounts that need to be looked up later from any of the nodes that had a matching hash but different from previously used nodes - // // Repeat the “Sync the Account State Table Second Pass” step - // // Repeat the “Process the Account data” step - // await this.syncFailedAcccounts(lowAddress, highAddress) - - // if (this.failedAccountsRemain()) { - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: failedAccountsRemain, ${utils.stringifyReduce(lowAddress)} - ${utils.stringifyReduce(highAddress)} accountsWithStateConflict:${ - // this.accountsWithStateConflict.length - // } missingAccountData:${this.missingAccountData.length} stateTableForMissingTXs:${Object.keys(this.stateTableForMissingTXs).length}` - // ) - - // //This section allows to retry for failed accounts but it greatly slows down the sync process, so I think that is not the right answer - - // // this.mainLogger.debug(`DATASYNC: failedAccountsRemain, wait ${this.stateManager.syncSettleTime}ms and retry ${lowAddress} - ${highAddress}`) - // // await utils.sleep(this.stateManager.syncSettleTime) - - // // await this.syncFailedAcccounts(lowAddress, highAddress) - - // // if(this.failedAccountsRemain()){ - // // this.statemanager_fatal(`failedAccountsRemain2`, `failedAccountsRemain2: this.accountsWithStateConflict:${utils.stringifyReduce(this.accountsWithStateConflict)} this.missingAccountData:${utils.stringifyReduce(this.missingAccountData)} `) - // // } else { - // // this.mainLogger.debug(`DATASYNC: syncFailedAcccounts FIX WORKED`) - // // } - // } - - // let keysToRepair = Object.keys(this.stateTableForMissingTXs).length - // if (keysToRepair > 0) { - // // alternate repair. - // this.repairMissingTXs() - // } - - // /* prettier-ignore */ nestedCountersInstance.countEvent('sync', `sync partition: ${partition} end: ${this.stateManager.currentCycleShardData.cycleNumber} accountsSynced:${accountsSaved} missing tx to repair: ${keysToRepair}`) - - // } catch (error) { - // if(error.message.includes('reset-sync-ranges')){ - - // this.statemanager_fatal(`syncStateDataForRange_reset-sync-ranges`, 'DATASYNC: reset-sync-ranges: ' + errorToStringFull(error)) - // //buble up: - // throw new Error('reset-sync-ranges') - // } else if (error.message.includes('FailAndRestartPartition')) { - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: Error Failed at: ${error.stack}`) - // this.statemanager_fatal(`syncStateDataForRange_ex_failandrestart`, 'DATASYNC: FailAndRestartPartition: ' + errorToStringFull(error)) - // await this.failandRestart() - // } else { - // this.statemanager_fatal(`syncStateDataForRange_ex`, 'syncStateDataForPartition failed: ' + errorToStringFull(error)) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: unexpected error. restaring sync:` + errorToStringFull(error)) - // await this.failandRestart() - // } - // } - // } - - // /*** - // * ###### ## ## ## ## ###### ###### ######## ### ######## ######## ######## ### ######## ## ######## ######## ### ######## ### - // * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - // * ## #### #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - // * ###### ## ## ## ## ## ###### ## ## ## ## ###### ## ## ## ######## ## ###### ## ## ## ## ## ## ## - // * ## ## ## #### ## ## ## ######### ## ## ## ######### ## ## ## ## ## ## ######### ## ######### - // * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - // * ###### ## ## ## ###### ###### ## ## ## ## ######## ## ## ## ######## ######## ######## ######## ## ## ## ## ## - // */ - // /** - // * syncStateTableData - // * @param lowAddress - // * @param highAddress - // * @param startTime - // * @param endTime - // */ - // async syncStateTableData(lowAddress: string, highAddress: string, startTime: number, endTime: number) { - // let searchingForGoodData = true - - // if (this.stateManager.currentCycleShardData == null) { - // return - // } - - // let debugRange = ` ${utils.stringifyReduce(lowAddress)} - ${utils.stringifyReduce(highAddress)}` - - // /* prettier-ignore */ if (logFlags.console) console.log(`syncStateTableData startTime: ${startTime} endTime: ${endTime}` + ' time:' + Date.now()) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateTableData startTime: ${startTime} endTime: ${endTime} low: ${lowAddress} high: ${highAddress} `) - // // todo m11: this loop will try three more random nodes, this is slightly different than described how to handle failure in the doc. this should be corrected but will take more code - // // should prossible break this into a state machine in its own class. - // while (searchingForGoodData) { - // // todo m11: this needs to be replaced - // // Sync the Account State Table First Pass - // // Use the /get_account_state_hash API to get the hash from 3 or more nodes until there is a match between 3 nodes. Ts_start should be 0, or beginning of time. The Ts_end value should be current time minus 10T (as configured) - // // Use the /get_account_state API to get the data from one of the 3 nodes - // // Take the hash of the data to ensure that it matches the expected hash value - // // If not try getting the data from another node - // // If the hash matches then update our Account State Table with the data - // // Repeat this for each address range or partition - // let currentTs = Date.now() - - // let safeTime = currentTs - this.stateManager.syncSettleTime - // if (endTime >= safeTime) { - // // need to idle for bit - // await utils.sleep(endTime - safeTime) - // } - // this.lastStateSyncEndtime = endTime + 1 // Adding +1 so that the next query will not overlap the time bounds. this saves us from a bunch of data tracking and filtering to remove duplicates when this function is called later - - // let firstHash - // let queryLow - // let queryHigh - - // queryLow = lowAddress - // queryHigh = highAddress - // let message = { accountStart: queryLow, accountEnd: queryHigh, tsStart: startTime, tsEnd: endTime } - - // let equalFn = (a: AccountStateHashResp, b: AccountStateHashResp) => { - // if (a.stateHash == null) { - // return false // fail cases will get skipped so that we try more nodes. - // } - // return a.stateHash === b.stateHash - // } - // let queryFn = async (node: Shardus.Node) => { - // // Node Precheck! - // if (this.stateManager.isNodeValidForInternalMessage(node.id, 'get_account_state_hash', true, true) === false) { - // return { ready: false, msg: `get_account_state_hash invalid node to ask: ${utils.stringifyReduce(node.id)}` } - // } - // let result = await this.p2p.ask(node, 'get_account_state_hash', message) - // if (result === false) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL syncStateTableData result === false node:${utils.stringifyReduce(node.id)}`) - // } - // if (result == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL syncStateTableData result == null node:${utils.stringifyReduce(node.id)}`) - // } - - // // TODO I dont know the best way to handle a non null network error here, below is an idea - - // // if (result.stateHash == null) { - // // if (logFlags.error) this.mainLogger.error('ASK FAIL syncStateTableData result.stateHash == null') - // // result = null //if we get something back that is not the right data type clear it to null - // // } - // if (result != null && result.stateHash == null) { - // result = { ready: false, msg: `invalid data format: ${Math.random()}` } - // //if (logFlags.error) this.mainLogger.error('ASK FAIL syncStateTableData result.stateHash == null') - // result = null //if we get something back that is not the right data type clear it to null - // } - - // if (result != null && result.ready === false) { - // result = { ready: false, msg: `nodeNotReady` } - // result = null - // } - // return result - // } - - // let centerNode = ShardFunctions.getCenterHomeNode(this.stateManager.currentCycleShardData.shardGlobals, this.stateManager.currentCycleShardData.parititionShardDataMap, lowAddress, highAddress) - // if (centerNode == null) { - // if (logFlags.debug) this.mainLogger.debug(`centerNode not found`) - // return - // } - - // let nodes: Shardus.Node[] = ShardFunctions.getNodesByProximity( - // this.stateManager.currentCycleShardData.shardGlobals, - // this.stateManager.currentCycleShardData.activeNodes, - // centerNode.ourNodeIndex, - // this.p2p.id, - // 40 - // ) - - // nodes = nodes.filter(this.removePotentiallyRemovedNodes) - - // let filteredNodes = [] - // for(let node of nodes){ - - // let nodeShardData = this.stateManager.currentCycleShardData.nodeShardDataMap.get(node.id) - // if(nodeShardData != null){ - - // if(ShardFunctions.testAddressInRange(queryLow, nodeShardData.consensusPartitions) === false){ - // continue - // } - // if(ShardFunctions.testAddressInRange(queryHigh, nodeShardData.consensusPartitions) === false){ - // continue - // } - // filteredNodes.push(node) - // } - // } - // nodes = filteredNodes - - // if (Array.isArray(nodes) === false) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`syncStateTableData: non array returned ${utils.stringifyReduce(nodes)}`) - // return // nothing to do - // } - - // // let nodes = this.getActiveNodesInRange(lowAddress, highAddress) // this.p2p.state.getActiveNodes(this.p2p.id) - // if (nodes.length === 0) { - // if (logFlags.debug) this.mainLogger.debug(`no nodes available`) - // return // nothing to do - // } - // if (logFlags.debug) - // this.mainLogger.debug(`DATASYNC: robustQuery get_account_state_hash from ${utils.stringifyReduce(nodes.map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort))}`) - // let result - // let winners - // try { - // let robustQueryResult = await robustQuery(nodes, queryFn, equalFn, 3, false) - // result = robustQueryResult.topResult - // winners = robustQueryResult.winningNodes - - // let tries = 3 - // while(result && result.ready === false && tries > 0){ - // nestedCountersInstance.countEvent('sync','majority of nodes not ready, wait and retry') - // //too many nodes not ready - // await utils.sleep(30000) //wait 30 seconds and try again - // robustQueryResult = await robustQuery(nodes, queryFn, equalFn, 3, false) - // result = robustQueryResult.topResult - // winners = robustQueryResult.winningNodes - // tries-- - // } - - // if (robustQueryResult.isRobustResult == false) { - // if (logFlags.debug) this.mainLogger.debug('syncStateTableData: robustQuery ') - // this.statemanager_fatal(`syncStateTableData_nonRobust`, 'syncStateTableData: robustQuery ' + debugRange) - // throw new Error('FailAndRestartPartition_stateTable_A' + debugRange) - // } - - // } catch (ex) { - // // NOTE: no longer expecting an exception from robust query in cases where we do not have enough votes or respones! - // // but for now if isRobustResult == false then we local code wil throw an exception - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('syncStateTableData: robustQuery ' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // this.statemanager_fatal(`syncStateTableData_robustQ`, 'syncStateTableData: robustQuery ' + debugRange + ex.name + ': ' + ex.message + ' at ' + ex.stack) - // throw new Error('FailAndRestartPartition_stateTable_B' + debugRange) - // } - - // if (result && result.stateHash) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: robustQuery returned result: ${result.stateHash}`) - // if (!winners || winners.length === 0) { - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: no winners, going to throw fail and restart`) - // this.statemanager_fatal(`syncStateTableData_noWin`, `DATASYNC: no winners, going to throw fail and restart` + debugRange) // todo: consider if this is just an error - // throw new Error('FailAndRestartPartition_stateTable_C' + debugRange) - // } - // this.dataSourceNode = winners[0] // Todo random index - // if (logFlags.debug) - // this.mainLogger.debug(`DATASYNC: got hash ${result.stateHash} from ${utils.stringifyReduce(winners.map((node: Shardus.Node) => utils.makeShortHash(node.id) + ':' + node.externalPort))}`) - // firstHash = result.stateHash - // } else { - // let resultStr = utils.stringifyReduce(result) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: robustQuery get_account_state_hash failed ${result} ` + debugRange) - // throw new Error('FailAndRestartPartition_stateTable_D ' + result + debugRange) - // } - - // let moreDataRemaining = true - // this.combinedAccountStateData = [] - // let loopCount = 0 - - // let lowTimeQuery = startTime - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: hash: getting state table data from: ${utils.makeShortHash(this.dataSourceNode.id) + ':' + this.dataSourceNode.externalPort}`) - - // // this loop is required since after the first query we may have to adjust the address range and re-request to get the next N data entries. - // while (moreDataRemaining) { - // // Node Precheck! - // if (this.stateManager.isNodeValidForInternalMessage(this.dataSourceNode.id, 'syncStateTableData', true, true) === false) { - // if (this.tryNextDataSourceNode('syncStateTableData') == false) { - // break - // } - // continue - // } - - // let message = { accountStart: queryLow, accountEnd: queryHigh, tsStart: lowTimeQuery, tsEnd: endTime } - // let result = await this.p2p.ask(this.dataSourceNode, 'get_account_state', message) - - // if (result == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error('ASK FAIL syncStateTableData result == null') - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // break - // } - // continue - // } - // if (result.accountStates == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error('ASK FAIL syncStateTableData result.accountStates == null') - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // break - // } - // continue - // } - - // let accountStateData = result.accountStates - // // get the timestamp of the last account state received so we can use it as the low timestamp for our next query - // if (accountStateData.length > 0) { - // let lastAccount = accountStateData[accountStateData.length - 1] - // if (lastAccount.txTimestamp > lowTimeQuery) { - // lowTimeQuery = lastAccount.txTimestamp - // } - // } - - // // If this is a repeated query, clear out any dupes from the new list we just got. - // // There could be many rows that use the stame timestamp so we will search and remove them - // let dataDuplicated = true - // if (loopCount > 0) { - // while (accountStateData.length > 0 && dataDuplicated) { - // let stateData = accountStateData[0] - // dataDuplicated = false - // for (let i = this.combinedAccountStateData.length - 1; i >= 0; i--) { - // let existingStateData = this.combinedAccountStateData[i] - // if (existingStateData.txTimestamp === stateData.txTimestamp && existingStateData.accountId === stateData.accountId) { - // dataDuplicated = true - // break - // } - // // once we get to an older timestamp we can stop looking, the outer loop will be done also - // if (existingStateData.txTimestamp < stateData.txTimestamp) { - // break - // } - // } - // if (dataDuplicated) { - // accountStateData.shift() - // } - // } - // } - - // if (accountStateData.length === 0) { - // moreDataRemaining = false - // } else { - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: syncStateTableData got ${accountStateData.length} more records from ${utils.makeShortHash(this.dataSourceNode.id) + ':' + this.dataSourceNode.externalPort}` - // ) - // this.combinedAccountStateData = this.combinedAccountStateData.concat(accountStateData) - - // nestedCountersInstance.countEvent('sync', `statetable written`, accountStateData.length) - - // loopCount++ - // } - // } - - // let seenAccounts = new Set() - - // //only hash one account state per account. the most recent one! - // let filteredAccountStates = [] - // for(let i = this.combinedAccountStateData.length -1; i>=0; i--){ - // let accountState:Shardus.StateTableObject = this.combinedAccountStateData[i] - - // if(seenAccounts.has(accountState.accountId) === true){ - // continue - // } - // seenAccounts.add(accountState.accountId) - // filteredAccountStates.unshift(accountState) - // } - - // let recievedStateDataHash = this.crypto.hash(filteredAccountStates) - - // if (recievedStateDataHash === firstHash) { - // searchingForGoodData = false - // } else { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateTableData finished downloading the requested data but the hash does not match`) - // // Failed again back through loop! TODO ? record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // throw new Error('FailAndRestartPartition_stateTable_E' + debugRange) - // } - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncStateTableData saving ${this.combinedAccountStateData.length} records to db`) - // // If the hash matches then update our Account State Table with the data - // await this.storage.addAccountStates(this.combinedAccountStateData) // keep in memory copy for faster processing... - // this.inMemoryStateTableData = this.inMemoryStateTableData.concat(this.combinedAccountStateData) - - // this.syncStatement.numSyncedState += this.combinedAccountStateData.length - // } - // } - - // /*** - // * ###### ## ## ## ## ###### ### ###### ###### ####### ## ## ## ## ######## ######## ### ######## ### - // * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## - // * ## #### #### ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## - // * ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - // * ## ## ## #### ## ######### ## ## ## ## ## ## ## #### ## ## ## ######### ## ######### - // * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## - // * ###### ## ## ## ###### ## ## ###### ###### ####### ####### ## ## ## ######## ## ## ## ## ## - // */ - // /** - // * syncAccountData - // * @param lowAddress - // * @param highAddress - // */ - // async syncAccountData(lowAddress: string, highAddress: string) { - // // Sync the Account data - // // Use the /get_account_data API to get the data from the Account Table using any of the nodes that had a matching hash - // if (logFlags.console) console.log(`syncAccountData3` + ' time:' + Date.now()) - - // if (this.config.stateManager == null) { - // throw new Error('this.config.stateManager == null') - // } - - // let queryLow = lowAddress - // let queryHigh = highAddress - - // let moreDataRemaining = true - - // this.combinedAccountData = [] - // let loopCount = 0 - - // let startTime = 0 - // let lowTimeQuery = startTime - - // if(this.useStateTable === false){ - // this.dataSourceNode = null - // this.getDataSourceNode(lowAddress, highAddress) - // } - - // if(this.dataSourceNode == null){ - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`syncAccountData: dataSourceNode == null ${lowAddress} - ${highAddress}`) - // //if we see this then getDataSourceNode failed. - // // this is most likely because the ranges selected when we started sync are now invalid and too wide to be filled. - - // //throwing this specific error text will bubble us up to the main sync loop and cause re-init of all the non global sync ranges/trackers - // throw new Error('reset-sync-ranges') - // } - - // // This flag is kind of tricky. It tells us that the loop can go one more time after bumping up the min timestamp to check - // // If we still don't get account data then we will quit. - // // This is needed to solve a case where there are more than 2x account sync max accounts in the same timestamp - // let stopIfNextLoopHasNoResults = false - - // let offset = 0 - // // this loop is required since after the first query we may have to adjust the address range and re-request to get the next N data entries. - // while (moreDataRemaining) { - // // Node Precheck! - // if (this.stateManager.isNodeValidForInternalMessage(this.dataSourceNode.id, 'syncAccountData', true, true) === false) { - // if (this.tryNextDataSourceNode('syncAccountData') == false) { - // break - // } - // continue - // } - - // // max records artificially low to make testing coverage better. todo refactor: make it a config or calculate based on data size - // let message = { accountStart: queryLow, accountEnd: queryHigh, tsStart: startTime, maxRecords: this.config.stateManager.accountBucketSize, offset } - // let r: GetAccountData3Resp | boolean = await this.p2p.ask(this.dataSourceNode, 'get_account_data3', message) // need the repeatable form... possibly one that calls apply to allow for datasets larger than memory - - // // TSConversion need to consider better error handling here! - // let result: GetAccountData3Resp = r as GetAccountData3Resp - - // if (result == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`ASK FAIL syncAccountData result == null node:${this.dataSourceNode.id}`) - // if (this.tryNextDataSourceNode('syncAccountData') == false) { - // break - // } - // continue - // } - // if (result.data == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`ASK FAIL syncAccountData result.data == null node:${this.dataSourceNode.id}`) - // if (this.tryNextDataSourceNode('syncAccountData') == false) { - // break - // } - // continue - // } - // // accountData is in the form [{accountId, stateId, data}] for n accounts. - // let accountData = result.data.wrappedAccounts - // let lastUpdateNeeded = result.data.lastUpdateNeeded - - // let lastLowQuery = lowTimeQuery - // // get the timestamp of the last account data received so we can use it as the low timestamp for our next query - // if (accountData.length > 0) { - // let lastAccount = accountData[accountData.length - 1] - // if (lastAccount.timestamp > lowTimeQuery) { - // lowTimeQuery = lastAccount.timestamp - // startTime = lowTimeQuery - // } - // } - - // let sameAsStartTS = 0 - - // // If this is a repeated query, clear out any dupes from the new list we just got. - // // There could be many rows that use the stame timestamp so we will search and remove them - // let dataDuplicated = true - // if (loopCount > 0) { - // while (accountData.length > 0 && dataDuplicated) { - // let stateData = accountData[0] - // dataDuplicated = false - - // if(stateData.timestamp === lastLowQuery){ - // sameAsStartTS++ - // } - - // //todo get rid of this in next verision - // for (let i = this.combinedAccountData.length - 1; i >= 0; i--) { - // let existingStateData = this.combinedAccountData[i] - // if (existingStateData.timestamp === stateData.timestamp && existingStateData.accountId === stateData.accountId) { - // dataDuplicated = true - // break - // } - // // once we get to an older timestamp we can stop looking, the outer loop will be done also - // if (existingStateData.timestamp < stateData.timestamp) { - // break - // } - // } - // if (dataDuplicated) { - // accountData.shift() - // } - // } - // } - - // if(lastLowQuery === lowTimeQuery){ - // //update offset, so we can get next page of data - // //offset+= (result.data.wrappedAccounts.length + result.data.wrappedAccounts2.length) - // offset+=sameAsStartTS //conservative offset! - // } else { - // //clear offset - // offset=0 - // } - - // // if we have any accounts in wrappedAccounts2 - // let accountData2 = result.data.wrappedAccounts2 - // if (accountData2.length > 0) { - // while (accountData.length > 0 && dataDuplicated) { - // let stateData = accountData2[0] - // dataDuplicated = false - // for (let i = this.combinedAccountData.length - 1; i >= 0; i--) { - // let existingStateData = this.combinedAccountData[i] - // if (existingStateData.timestamp === stateData.timestamp && existingStateData.accountId === stateData.accountId) { - // dataDuplicated = true - // break - // } - // // once we get to an older timestamp we can stop looking, the outer loop will be done also - // if (existingStateData.timestamp < stateData.timestamp) { - // break - // } - // } - // if (dataDuplicated) { - // accountData2.shift() - // } - // } - // } - - // if (lastUpdateNeeded || (accountData2.length === 0 && accountData.length === 0)) { - // if(lastUpdateNeeded){ - // //we are done - // moreDataRemaining = false - // } else { - // if(stopIfNextLoopHasNoResults === true){ - // //we are done - // moreDataRemaining = false - // } else{ - // //bump start time and loop once more! - // //If we don't get anymore accounts on that loopl then we will quit for sure - // //If we do get more accounts then stopIfNextLoopHasNoResults will reset in a branch below - // startTime++ - // loopCount++ - // stopIfNextLoopHasNoResults = true - // } - // } - - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: syncAccountData3 got ${accountData.length} more records. last update: ${lastUpdateNeeded} extra records: ${result.data.wrappedAccounts2.length} tsStart: ${lastLowQuery} highestTS1: ${result.data.highestTs} delta:${result.data.delta} offset:${offset}` - // ) - // if (accountData.length > 0) { - // this.combinedAccountData = this.combinedAccountData.concat(accountData) - // } - // if (accountData2.length > 0) { - // this.combinedAccountData = this.combinedAccountData.concat(accountData2) - // } - // } else { - // //we got accounts this time so reset this flag to false - // stopIfNextLoopHasNoResults = false - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: syncAccountData3b got ${accountData.length} more records. last update: ${lastUpdateNeeded} extra records: ${result.data.wrappedAccounts2.length} tsStart: ${lastLowQuery} highestTS1: ${result.data.highestTs} delta:${result.data.delta} offset:${offset}` - // ) - // this.combinedAccountData = this.combinedAccountData.concat(accountData) - // loopCount++ - // // await utils.sleep(500) - // } - // await utils.sleep(200) - // } - // } - - // /** - // * processAccountData - // * // Process the Account data - // * // For each account in the Account data make sure the entry in the Account State Table has the same State_after value; if not remove the record from the Account data - // * // For each account in the Account State Table make sure the entry in Account data has the same State_after value; if not save the account id to be looked up later - // * // Use the App.set_account_data function with the Account data to save the data to the application Accounts Table; if any failed accounts are returned save the account id to be looked up later - // * // State data = {accountId, txId, txTimestamp, stateBefore, stateAfter} - // * // accountData is in the form [{accountId, stateId, data}] for n accounts. - // */ - // async processAccountData() : Promise { - - // if(this.useStateTable === false){ - // return await this.processAccountDataNoStateTable() - - // } - - // this.missingAccountData = [] - // this.mapAccountData = {} - // this.stateTableForMissingTXs = {} - // // create a fast lookup map for the accounts we have. Perf. will need to review if this fits into memory. May need a novel structure. - // let account - // for (let i = 0; i < this.combinedAccountData.length; i++) { - // account = this.combinedAccountData[i] - // this.mapAccountData[account.accountId] = account - // } - - // let accountKeys = Object.keys(this.mapAccountData) - // let uniqueAccounts = accountKeys.length - // let initialCombinedAccountLength = this.combinedAccountData.length - // if (uniqueAccounts < initialCombinedAccountLength) { - // // keep only the newest copies of each account: - // // we need this if using a time based datasync - // this.combinedAccountData = [] - // for (let accountID of accountKeys) { - // this.combinedAccountData.push(this.mapAccountData[accountID]) - // } - // } - - // let missingButOkAccounts = 0 - // let missingTXs = 0 - // let handledButOk = 0 - // let otherMissingCase = 0 - // let futureStateTableEntry = 0 - // let missingButOkAccountIDs: { [id: string]: boolean } = {} - - // let missingAccountIDs: { [id: string]: boolean } = {} - - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: processAccountData stateTableCount: ${this.inMemoryStateTableData.length} unique accounts: ${uniqueAccounts} initial combined len: ${initialCombinedAccountLength}` - // ) - // // For each account in the Account data make sure the entry in the Account State Table has the same State_after value; if not remove the record from the Account data - - // for (let stateData of this.inMemoryStateTableData) { - // account = this.mapAccountData[stateData.accountId] - // // does the state data table have a node and we don't have data for it? - // if (account == null) { - // // make sure we have a transaction that matches this in our queue - // // the state table data we are working with is sufficiently old, so that we should have seen a transaction in our queue by the time we could get here - - // // if the account is seend in state table data but this was state table data that was from after time lastStateSyncEndtime - // // then we wont care about missing this account. receipt repair should take care of it. - // // alternatively this could be fixed with more advance logic on the receipt repair side of things. - // let time = Number(stateData.txTimestamp) - // if (time > this.lastStateSyncEndtime) { - // futureStateTableEntry++ - // continue - // } - - // //acceptedTXByHash seems to always be empty so this forces missingTXs - // // let txRef = this.acceptedTXByHash[stateData.txId] - // // if (txRef == null) { - // // missingTXs++ - // // if (stateData.accountId != null) { - // // this.missingAccountData.push(stateData.accountId) - // // missingAccountIDs[stateData.accountId] = true - // // } - // // } else - // if (stateData.stateBefore === allZeroes64) { - // // this means we are at the start of a valid state table chain that starts with creating an account - // missingButOkAccountIDs[stateData.accountId] = true - // missingButOkAccounts++ - // } else if (missingButOkAccountIDs[stateData.accountId] === true) { - // // no action. we dont have account, but we know a different transaction will create it. - // handledButOk++ - // } else { - // // unhandled case. not expected. this would happen if the state table chain does not start with this account being created - // // this could be caused by a node trying to withold account data when syncing - // if (stateData.accountId != null) { - // this.missingAccountData.push(stateData.accountId) - // missingAccountIDs[stateData.accountId] = true - // } - // otherMissingCase++ - // } - // // should we check timestamp for the state table data? - // continue - // } - - // if (!account.syncData) { - // account.syncData = { timestamp: 0 } - // } - - // if (account.stateId === stateData.stateAfter) { - // // mark it good. - // account.syncData.uptodate = true - // account.syncData.anyMatch = true - // if (stateData.txTimestamp > account.syncData.timestamp) { - // account.syncData.missingTX = false // finding a good match can clear the old error. this relys on things being in order! - // account.syncData.timestamp = stateData.txTimestamp - - // //clear the missing reference if we have one - // delete this.stateTableForMissingTXs[stateData.accountId] - // } - // } else { - // // this state table data does not match up with what we have for the account - - // // if the state table TS is newer than our sync data that means the account has changed - // // and the data we have for it is not up to date. - // if (stateData.txTimestamp > account.syncData.timestamp) { - // account.syncData.uptodate = false - // // account.syncData.stateData = stateData - // // chceck if we are missing a tx to handle this. - // let txRef = this.acceptedTXByHash[stateData.txId] - // if (txRef == null) { - // // account.syncData.missingTX = true - // // if (stateData.txTimestamp > account.syncData.timestamp) { - // account.syncData.missingTX = true - // // account.syncData.timestamp = stateData.txTimestamp - // // } - // // should we try to un foul the missingTX flag here?? - // } - - // account.syncData.timestamp = stateData.txTimestamp - - // // record this because we may want to repair to it. - // this.stateTableForMissingTXs[stateData.accountId] = stateData - // } - // } - // } - - // if (missingButOkAccounts > 0) { - // // it is valid / normal flow to get to this point: - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: processAccountData accouts missing from accountData, but are ok, because we have transactions for them: missingButOKList: ${missingButOkAccounts}, handledbutOK: ${handledButOk}` - // ) - // } - // if (this.missingAccountData.length > 0) { - // // getting this indicates a non-typical problem that needs correcting - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: processAccountData accounts missing from accountData, but in the state table. This is an unexpected error and we will need to handle them as failed accounts: missingList: ${ - // this.missingAccountData.length - // }, missingTX count: ${missingTXs} missingUnique: ${Object.keys(missingAccountIDs).length}` - // ) - // } - - // // For each account in the Account State Table make sure the entry in Account data has the same State_after value; if not save the account id to be looked up later - // this.accountsWithStateConflict = [] - // let goodAccounts: Shardus.WrappedData[] = [] - // let noSyncData = 0 - // let noMatches = 0 - // let outOfDateNoTxs = 0 - // let unhandledCase = 0 - // let fix1Worked = 0 - // for (let account of this.combinedAccountData) { - // if (!account.syncData) { - // // this account was not found in state data - // this.accountsWithStateConflict.push(account) - // noSyncData++ - // //turning this case back off. - // } else if (account.syncData.anyMatch === true) { - // if (account.syncData.missingTX) { - // fix1Worked++ - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData FIX WORKED. ${utils.stringifyReduce(account)} `) - // } - // //this is the positive case. We have a match so we can use this account - // delete account.syncData - // goodAccounts.push(account) - // } else if (!account.syncData.anyMatch) { - // // this account was in state data but none of the state table stateAfter matched our state - // this.accountsWithStateConflict.push(account) - // noMatches++ - // } else if (account.syncData.missingTX) { - // // - // this.accountsWithStateConflict.push(account) - // outOfDateNoTxs++ - // } else { - // // could be good but need to check if we got stamped with some older datas. - // // if (account.syncData.uptodate === false) { - // // // check for a missing transaction. - // // // need to check above so that a right cant clear a wrong. - // // let txRef = this.acceptedTXByHash[account.syncData.stateData.txId] - // // if (txRef == null) { - // // this.mainLogger.debug(`DATASYNC: processAccountData account not up to date ${utils.stringifyReduce(account)}`) - // // this.accountsWithStateConflict.push(account) - // // outOfDateNoTxs++ - // // continue - // // } - // // } - // unhandledCase++ - - // // delete account.syncData - // // goodAccounts.push(account) - // } - // } - - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: processAccountData saving ${goodAccounts.length} of ${this.combinedAccountData.length} records to db. noSyncData: ${noSyncData} noMatches: ${noMatches} missingTXs: ${missingTXs} handledButOk: ${handledButOk} otherMissingCase: ${otherMissingCase} outOfDateNoTxs: ${outOfDateNoTxs} futureStateTableEntry:${futureStateTableEntry} unhandledCase:${unhandledCase} fix1Worked:${fix1Worked}` - // ) - // // failedHashes is a list of accounts that failed to match the hash reported by the server - // let failedHashes = await this.stateManager.checkAndSetAccountData(goodAccounts, 'syncNonGlobals:processAccountData', true) // repeatable form may need to call this in batches - - // this.syncStatement.numAccounts += goodAccounts.length - - // if (failedHashes.length > 1000) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData failed hashes over 1000: ${failedHashes.length} restarting sync process`) - // // state -> try another node. TODO record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // throw new Error('FailAndRestartPartition_processAccountData_A') - // } - // if (failedHashes.length > 0) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData failed hashes: ${failedHashes.length} will have to download them again`) - // // TODO ? record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // this.failedAccounts = this.failedAccounts.concat(failedHashes) - // for (let accountId of failedHashes) { - // account = this.mapAccountData[accountId] - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData ${accountId} data: ${utils.stringifyReduce(account)}`) - - // if (account != null) { - // if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData adding account to list`) - // this.accountsWithStateConflict.push(account) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData cant find data: ${accountId}`) - // if (accountId) { - // //this.accountsWithStateConflict.push({ address: accountId, }) //NOTE: fixed with refactor - // this.accountsWithStateConflict.push({ accountId: accountId, data: null, stateId: null, timestamp: 0 }) - // } - // } - // } - // } - - // let accountsSaved = await this.stateManager.writeCombinedAccountDataToBackups(goodAccounts, failedHashes) - - // nestedCountersInstance.countEvent('sync', `accounts written`, accountsSaved) - - // this.combinedAccountData = [] // we can clear this now. - - // return accountsSaved - // } - - /*** - * ###### ## ## ## ## ###### ######## ### #### ## ######## ######## ### ###### ###### ###### ####### ## ## ## ## ######## ###### - * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## #### #### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## - * ###### ## ## ## ## ## ###### ## ## ## ## ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ###### - * ## ## ## #### ## ## ######### ## ## ## ## ## ######### ## ## ## ## ## ## ## ## #### ## ## - * ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## - * ###### ## ## ## ###### ## ## ## #### ######## ######## ######## ## ## ###### ###### ###### ####### ####### ## ## ## ###### - */ - /** - * syncFailedAcccounts - * // Sync the failed accounts - * // Log that some account failed - * // Use the /get_account_data_by_list API to get the data for the accounts that need to be looked up later from any of the nodes that had a matching hash but different from previously used nodes - * // Repeat the “Sync the Account State Table Second Pass” step - * // Repeat the “Process the Account data” step - * - * @param lowAddress - * @param highAddress - */ - // async syncFailedAcccounts(lowAddress: string, highAddress: string) { - // if (this.accountsWithStateConflict.length === 0 && this.missingAccountData.length === 0) { - // if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncFailedAcccounts no failed hashes to sync`) - // return - // } - - // nestedCountersInstance.countEvent('sync', 'syncFailedAcccounts') - // this.syncStatement.failedAccountLoops++ - - // if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: syncFailedAcccounts start`) - // let addressList: string[] = [] - // for (let accountEntry of this.accountsWithStateConflict) { - // // //NOTE: fixed with refactor - // // if (accountEntry.data && accountEntry.data.address) { - // // addressList.push(accountEntry.data.address) - // if (accountEntry.accountId) { - // addressList.push(accountEntry.accountId) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: syncFailedAcccounts failed to add account ${accountEntry}`) - // } - // } - // // add the addresses of accounts that we got state table data for but not data for - // addressList = addressList.concat(this.missingAccountData) - // this.missingAccountData = [] - - // // TODO m11: should we pick different nodes to ask? (at the very least need to change the data source node!!!!!!) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncFailedAcccounts requesting data for failed hashes ${utils.stringifyReduce(addressList)}`) - - // // Node Precheck! - // if (this.stateManager.isNodeValidForInternalMessage(this.dataSourceNode.id, 'syncStateDataGlobals', true, true) === false) { - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // return - // } - // //we picked a new node to ask so relaunch - // await this.syncFailedAcccounts(lowAddress, highAddress) - // return - // } - - // let message = { accountIds: addressList } - // let result = await this.p2p.ask(this.dataSourceNode, 'get_account_data_by_list', message) - - // nestedCountersInstance.countEvent('sync', 'syncFailedAcccounts accountsFailed', addressList.length) - // this.syncStatement.failedAccounts += addressList.length - - // if (result == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error('ASK FAIL syncFailedAcccounts result == null') - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // return - // } - // //we picked a new node to ask so relaunch - // await this.syncFailedAcccounts(lowAddress, highAddress) - // return - // } - // if (result.accountData == null) { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error('ASK FAIL syncFailedAcccounts result.accountData == null') - // if (this.tryNextDataSourceNode('syncStateDataGlobals') == false) { - // return - // } - // //we picked a new node to ask so relaunch - // await this.syncFailedAcccounts(lowAddress, highAddress) - // return - // } - - // this.combinedAccountData = this.combinedAccountData.concat(result.accountData) - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: syncFailedAcccounts combinedAccountData: ${this.combinedAccountData.length} accountData: ${result.accountData.length}`) - // if(this.useStateTable === true){ - // //depricated - // //await this.syncStateTableData(lowAddress, highAddress, this.lastStateSyncEndtime, Date.now()) - // } - // // process the new accounts. - // // await this.processAccountData() //using state table is depricated - - // await this.processAccountDataNoStateTable() - // } - - /*** - * ######## ######## ######## ### #### ######## ## ## #### ###### ###### #### ## ## ###### ######## ## ## ###### - * ## ## ## ## ## ## ## ## ## ## ### ### ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## #### #### ## ## ## ## #### ## ## ## ## ## ## - * ######## ###### ######## ## ## ## ######## ## ### ## ## ###### ###### ## ## ## ## ## #### ## ### ###### - * ## ## ## ## ######### ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## - * ## ## ######## ## ## ## #### ## ## ## ## #### ###### ###### #### ## ## ###### ## ## ## ###### - */ - /** - * repairMissingTXs - * - */ - // async repairMissingTXs() { - // nestedCountersInstance.countEvent('sync', 'repairMissingTXs') - - // let keys = Object.keys(this.stateTableForMissingTXs) - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: repairMissingTXs begin: ${keys.length} ${utils.stringifyReduce(keys)}`) - // for (let key of keys) { - // try { - // this.profiler.profileSectionStart('repairMissingTX') - // let stateTableData = this.stateTableForMissingTXs[key] - - // if (stateTableData == null) { - // nestedCountersInstance.countEvent('sync', 'repairMissingTXs stateTableData == null') - // continue - // } - // if (stateTableData.txId == null) { - // nestedCountersInstance.countEvent('sync', 'repairMissingTXs stateTableData.txId == null') - // continue - // } - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: repairMissingTXs start: ${utils.stringifyReduce(stateTableData)}`) - // //get receipt for txID - // let result = await this.stateManager.getTxRepair().requestMissingReceipt(stateTableData.txId, Number(stateTableData.txTimestamp), stateTableData.accountId) - // if (result != null && result.success === true) { - // //@ts-ignore todo can axe this when we get rid of old receipts - // let repairOk = await this.stateManager.getTxRepair().repairToMatchReceiptWithoutQueueEntry(result.receipt, stateTableData.accountId) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: repairMissingTXs finished: ok:${repairOk} ${utils.stringifyReduce(stateTableData)}`) - // } else { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: repairMissingTXs cant get receipt: ${utils.stringifyReduce(stateTableData)}`) - // this.statemanager_fatal(`repairMissingTXs_fail`, `repairMissingTXs_fail ${utils.stringifyReduce(stateTableData)} result:${utils.stringifyReduce(result)}`) - // } - // } catch (error) { - // this.statemanager_fatal(`repairMissingTXs_ex`, 'repairMissingTXs ex: ' + errorToStringFull(error)) - // } finally { - // this.profiler.profileSectionEnd('repairMissingTX') - // } - // } - // } - - /*** - * ######## ######## ####### ###### ######## ###### ###### ### ###### ###### ####### ## ## ## ## ######## ######## ### ######## ### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## ## ## ## - * ######## ######## ## ## ## ###### ###### ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ######### ## ## ## ## ## ## ## #### ## ## ## ######### ## ######### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## - * ## ## ## ####### ###### ######## ###### ###### ## ## ###### ###### ####### ####### ## ## ## ######## ## ## ## ## ## - */ - - // async processAccountDataNoStateTable() : Promise { - // this.missingAccountData = [] - // this.mapAccountData = {} - // this.stateTableForMissingTXs = {} - // // create a fast lookup map for the accounts we have. Perf. will need to review if this fits into memory. May need a novel structure. - // let account - // for (let i = 0; i < this.combinedAccountData.length; i++) { - // account = this.combinedAccountData[i] - // this.mapAccountData[account.accountId] = account - // } - - // let accountKeys = Object.keys(this.mapAccountData) - // let uniqueAccounts = accountKeys.length - // let initialCombinedAccountLength = this.combinedAccountData.length - // if (uniqueAccounts < initialCombinedAccountLength) { - // // keep only the newest copies of each account: - // // we need this if using a time based datasync - // this.combinedAccountData = [] - // for (let accountID of accountKeys) { - // this.combinedAccountData.push(this.mapAccountData[accountID]) - // } - // } - - // let missingButOkAccounts = 0 - // let missingTXs = 0 - // let handledButOk = 0 - // let otherMissingCase = 0 - // let futureStateTableEntry = 0 - // let missingButOkAccountIDs: { [id: string]: boolean } = {} - - // let missingAccountIDs: { [id: string]: boolean } = {} - - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: processAccountData stateTableCount: ${this.inMemoryStateTableData.length} unique accounts: ${uniqueAccounts} initial combined len: ${initialCombinedAccountLength}` - // ) - // // For each account in the Account data make sure the entry in the Account State Table has the same State_after value; if not remove the record from the Account data - - // // For each account in the Account State Table make sure the entry in Account data has the same State_after value; if not save the account id to be looked up later - // this.accountsWithStateConflict = [] - // let goodAccounts: Shardus.WrappedData[] = [] - // let noSyncData = 0 - // let noMatches = 0 - // let outOfDateNoTxs = 0 - // let unhandledCase = 0 - // let fix1Worked = 0 - // for (let account of this.combinedAccountData) { - // goodAccounts.push(account) - // } - - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: processAccountData saving ${goodAccounts.length} of ${this.combinedAccountData.length} records to db. noSyncData: ${noSyncData} noMatches: ${noMatches} missingTXs: ${missingTXs} handledButOk: ${handledButOk} otherMissingCase: ${otherMissingCase} outOfDateNoTxs: ${outOfDateNoTxs} futureStateTableEntry:${futureStateTableEntry} unhandledCase:${unhandledCase} fix1Worked:${fix1Worked}` - // ) - // // failedHashes is a list of accounts that failed to match the hash reported by the server - // let failedHashes = await this.stateManager.checkAndSetAccountData(goodAccounts, 'syncNonGlobals:processAccountDataNoStateTable', true) // repeatable form may need to call this in batches - - // this.syncStatement.numAccounts += goodAccounts.length - - // if (failedHashes.length > 1000) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData failed hashes over 1000: ${failedHashes.length} restarting sync process`) - // // state -> try another node. TODO record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // throw new Error('FailAndRestartPartition_processAccountData_A') - // } - // if (failedHashes.length > 0) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`DATASYNC: processAccountData failed hashes: ${failedHashes.length} will have to download them again`) - // // TODO ? record/eval/report blame? - // this.stateManager.recordPotentialBadnode() - // this.failedAccounts = this.failedAccounts.concat(failedHashes) - // for (let accountId of failedHashes) { - // account = this.mapAccountData[accountId] - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData ${accountId} data: ${utils.stringifyReduce(account)}`) - - // if (account != null) { - // if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData adding account to list`) - // this.accountsWithStateConflict.push(account) - // } else { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`DATASYNC: processAccountData cant find data: ${accountId}`) - // if (accountId) { - // //this.accountsWithStateConflict.push({ address: accountId, }) //NOTE: fixed with refactor - // this.accountsWithStateConflict.push({ accountId: accountId, data: null, stateId: null, timestamp: 0 }) - // } - // } - // } - // } - - // let accountsSaved = await this.stateManager.writeCombinedAccountDataToBackups(goodAccounts, failedHashes) - - // nestedCountersInstance.countEvent('sync', `accounts written`, accountsSaved) - - // this.combinedAccountData = [] // we can clear this now. - - // return accountsSaved - // } - - //from:syncStateDataForRange2 - - // // Sync the failed accounts - // // Log that some account failed - // // Use the /get_account_data_by_list API to get the data for the accounts that need to be looked up later from any of the nodes that had a matching hash but different from previously used nodes - // // Repeat the “Sync the Account State Table Second Pass” step - // // Repeat the “Process the Account data” step - // await this.syncFailedAcccounts(lowAddress, highAddress) - - // if (this.failedAccountsRemain()) { - // if (logFlags.debug) - // this.mainLogger.debug( - // `DATASYNC: failedAccountsRemain, ${utils.stringifyReduce(lowAddress)} - ${utils.stringifyReduce(highAddress)} accountsWithStateConflict:${ - // this.accountsWithStateConflict.length - // } missingAccountData:${this.missingAccountData.length} stateTableForMissingTXs:${Object.keys(this.stateTableForMissingTXs).length}` - // ) - // } - - // let keysToRepair = Object.keys(this.stateTableForMissingTXs).length - // if (keysToRepair > 0) { - // // alternate repair. - // this.repairMissingTXs() - // } - - ///from get_account_data_by_hashes handler - // if(this.stateManager.accountSync.useStateTable === true){ - // if(accountsToGetStateTableDataFor.length > 0){ - // result.stateTableData = await this.stateManager.storage.queryAccountStateTableByListNewest(accountsToGetStateTableDataFor) - // } - // } - - // /** - // * failedAccountsRemain - // */ - // failedAccountsRemain(): boolean { - // // clean out account conflicts based on what TXs we we have in the queue that we can repair. - // // also mark tx for scheduled repair.. - - // // failed counts went way down after fixing liberdus end of things so putting this optimization on hold. - - // if (this.accountsWithStateConflict.length === 0 && this.missingAccountData.length === 0) { - // return false - // } - // return true - // } - - // this.p2p.registerInternal( - // 'repair_too_old_account_data', - // async ( - // payload: TooOldAccountUpdateRequest, - // respond: (arg0: boolean) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('repair_too_old_account_data', false, msgSize) - // let { accountID, txId, appliedReceipt2, updatedAccountData } = payload - // const hash = updatedAccountData.stateId - // const accountData = updatedAccountData - - // // check if we cover this accountId - // const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(accountID) - // const isInStorageGroup = storageNodes.map((node) => node.id).includes(Self.id) - // if (!isInStorageGroup) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: not in storage group for account: ${accountID}`) - // await respond(false) - // return - // } - // // check if we have already repaired this account - // const accountHashCache = this.stateManager.accountCache.getAccountHash(accountID) - // if (accountHashCache != null && accountHashCache.h === hash) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: already repaired account: ${accountID}`) - // await respond(false) - // return - // } - // if (accountHashCache != null && accountHashCache.t > accountData.timestamp) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: we have newer account: ${accountID}`) - // await respond(false) - // return - // } - - // const archivedQueueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, 'repair_too_old_account_data') - - // if (archivedQueueEntry == null) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: no archivedQueueEntry for txId: ${txId}`) - // this.mainLogger.debug(`repair_too_old_account_data: no archivedQueueEntry for txId: ${txId}`) - // await respond(false) - // return - // } - - // // check the vote and confirmation status of the tx - // const bestMessage = appliedReceipt2.confirmOrChallenge - // const receivedBestVote = appliedReceipt2.appliedVote - // if (receivedBestVote != null) { - // // Check if vote is from eligible list of voters for this TX - // if(!archivedQueueEntry.eligibleNodeIdsToVote.has(receivedBestVote.node_id)) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: vote from ineligible node for txId: ${txId}`) - // return - // } - - // // Check signature of the vote - // if (!this.crypto.verify( - // receivedBestVote as SignedObject, - // archivedQueueEntry.executionGroupMap.get(receivedBestVote.node_id).publicKey - // )) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: vote signature invalid for txId: ${txId}`) - // return - // } - - // // Check transaction result from vote - // if (!receivedBestVote.transaction_result) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: vote result not true for txId ${txId}`) - // return - // } - - // // Check account hash. Calculate account hash of account given in instruction - // // and compare it with the account hash in the vote. - // const calculatedAccountHash = this.app.calculateAccountHash(accountData.data) - // let accountHashMatch = false - // for (let i = 0; i < receivedBestVote.account_id.length; i++) { - // if (receivedBestVote.account_id[i] === accountID) { - // if (receivedBestVote.account_state_hash_after[i] !== calculatedAccountHash) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: account hash mismatch for txId: ${txId}`) - // accountHashMatch = false - // } else { - // accountHashMatch = true - // } - // break - // } - // } - // if (accountHashMatch === false) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: vote account hash mismatch for txId: ${txId}`) - // return - // } - // } else { - // // Skip this account apply as we were not able to get the best vote for this tx - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: no vote for txId: ${txId}`) - // return - // } - - // if (bestMessage != null) { - // // Skip if challenge receipt - // if (bestMessage.message === 'challenge') { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: challenge for txId: ${txId}`) - // return - // } - - // // Check if mesasge is from eligible list of responders for this TX - // if(!archivedQueueEntry.eligibleNodeIdsToConfirm.has(bestMessage.nodeId)) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: confirmation from ineligible node for txId: ${txId}`) - // return - // } - - // // Check signature of the message - // if(!this.crypto.verify( - // bestMessage as SignedObject, - // archivedQueueEntry.executionGroupMap.get(bestMessage.nodeId).publicKey - // )) { - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: confirmation signature invalid for txId: ${txId}`) - // return - // } - // } else { - // // Skip this account apply as we were not able to get the best confirmation for this tx - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data: no confirmation for txId: ${txId}`) - // return - // } - - // // update the account data (and cache?) - // const updatedAccounts: string[] = [] - // //save the account data. note this will make sure account hashes match the wrappers and return failed - // // hashes that don't match - // const failedHashes = await this.stateManager.checkAndSetAccountData( - // [accountData], - // `repair_too_old_account_data:${txId}`, - // true, - // updatedAccounts - // ) - // if (logFlags.debug) this.mainLogger.debug(`repair_too_old_account_data: ${updatedAccounts.length} updated, ${failedHashes.length} failed`) - // nestedCountersInstance.countEvent('accountPatcher', `repair_too_old_account_data:${updatedAccounts.length} updated, accountId: ${utils.makeShortHash(accountID)}, cycle: ${this.stateManager.currentCycleShardData.cycleNumber}`) - // if (failedHashes.length > 0) nestedCountersInstance.countEvent('accountPatcher', `update_too_old_account_data:${failedHashes.length} failed`) - // let success = false - // if (updatedAccounts.length > 0 && failedHashes.length === 0) { - // success = true - // } - // await respond(success) - - // profilerInstance.scopedProfileSectionEnd('repair_too_old_account_data') - // } - // ) -} - -export default Deprecated diff --git a/src/state-manager/Endpoints.ts b/src/state-manager/Endpoints.ts new file mode 100644 index 000000000..39575984f --- /dev/null +++ b/src/state-manager/Endpoints.ts @@ -0,0 +1,790 @@ +import { EventEmitter } from 'events' +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { profilerInstance } from '../utils/profiler' +import { + RequestReceiptForTxReqSerialized, + deserializeRequestReceiptForTxReq +} from '../types/RequestReceiptForTxReq' +import { + RequestReceiptForTxRespSerialized, + serializeRequestReceiptForTxResp +} from '../types/RequestReceiptForTxResp' +import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' +import { getStreamWithTypeCheck, requestErrorHandler } from '../types/Helpers' +import { InternalBinaryHandler } from '../types/Handler' +// Route type is defined inline with handlers +import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' +import { logFlags } from '../logger' +import * as utils from '../utils' +import { Utils } from '@shardeum-foundation/lib-types' +import * as ShardusTypes from '../shardus/shardus-types' +import { ResponseError } from '../types/ResponseError' +import * as Comms from '../p2p/Comms' +import * as Context from '../p2p/Context' +import { activeByIdOrder } from '../p2p/NodeList' +import { VectorBufferStream } from '../utils/serialization/VectorBufferStream' +import { InternalError, serializeResponseError, BadRequest } from '../types/ResponseError' +import { + RequestStateForTxPostReq, + deserializeRequestStateForTxPostReq +} from '../types/RequestStateForTxPostReq' +import { + RequestStateForTxPostResp, + serializeRequestStateForTxPostResp +} from '../types/RequestStateForTxPostResp' +import { + GetTrieHashesRequest, + serializeGetTrieHashesReq +} from '../types/GetTrieHashesReq' +import { + GetTrieHashesResponse, + deserializeGetTrieHashesResp +} from '../types/GetTrieHashesResp' +import { + GetAccountDataByHashesReq, + serializeGetAccountDataByHashesReq +} from '../types/GetAccountDataByHashesReq' +import { + GetAccountDataByHashesResp, + deserializeGetAccountDataByHashesResp +} from '../types/GetAccountDataByHashesResp' +import { + RepairOOSAccountsReq, + serializeRepairOOSAccountsReq, + deserializeRepairOOSAccountsReq +} from '../types/RepairOOSAccountsReq' +// BinaryRequest and BinaryResponse types are not needed +import { isDebugModeMiddleware, isDebugModeMiddlewareLow } from '../network/debugMiddleware' +import { + GetAccountDataWithQueueHintsReqSerializable, + deserializeGetAccountDataWithQueueHintsReq +} from '../types/GetAccountDataWithQueueHintsReq' +import { + GetAccountDataWithQueueHintsRespSerializable, + serializeGetAccountDataWithQueueHintsResp +} from '../types/GetAccountDataWithQueueHintsResp' +import { + RequestTxAndStateReq, + deserializeRequestTxAndStateReq +} from '../types/RequestTxAndStateReq' +import { + RequestTxAndStateResp, + serializeRequestTxAndStateResp +} from '../types/RequestTxAndStateResp' +import { + GetAccountQueueCountReq, + deserializeGetAccountQueueCountReq +} from '../types/GetAccountQueueCountReq' +import { + GetAccountQueueCountResp, + serializeGetAccountQueueCountResp +} from '../types/GetAccountQueueCountResp' + +// Type for the callback +export type Callback = (...args: unknown[]) => void + +export const endpointMethods = { + registerEndpoints() { + // alternatively we would need to query for accepted tx. + + this.accountGlobals.setupHandlers() + + if (this.partitionObjects != null) { + this.partitionObjects.setupHandlers() + } + + this.transactionQueue.setupHandlers() + + this.accountSync.setupHandlers() + + this.transactionConsensus.setupHandlers() + + this.accountPatcher.setupHandlers() + + this.cachedAppDataManager.setupHandlers() + + this.partitionStats.setupHandlers() + + const requestReceiptForTxBinaryHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_request_receipt_for_tx, + handler: (payload, respond) => { + const route = InternalRouteEnum.binary_request_receipt_for_tx + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + nestedCountersInstance.countEvent('stateManager', route) + + const response: RequestReceiptForTxRespSerialized = { receipt: null, note: '', success: false } + try { + const req = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestReceiptForTxReq) + const deserialized = deserializeRequestReceiptForTxReq(req) + let queueEntry = this.transactionQueue.getQueueEntrySafe(deserialized.txid) + if (queueEntry == null) { + queueEntry = this.transactionQueue.getQueueEntryArchived(deserialized.txid, route) + } + + if (queueEntry == null) { + response.note = `failed to find queue entry: ${utils.stringifyReduce(deserialized.txid)} ${deserialized.timestamp + } dbg:${this.debugTXHistory[utils.stringifyReduce(deserialized.txid)]}` + respond(response, serializeRequestReceiptForTxResp) + return + } + + if (queueEntry.acceptedTx?.timestamp !== deserialized.timestamp) { + response.note = `requested timestamp does not match txid: ${utils.stringifyReduce(deserialized.txid)} + request: ${deserialized.timestamp} + queueuEntry timestamp: ${queueEntry.acceptedTx?.timestamp} + dbg:${this.debugTXHistory[utils.stringifyReduce(deserialized.txid)]}` + respond(response, serializeRequestReceiptForTxResp) + return + } + + response.receipt = this.getSignedReceipt(queueEntry) + if (response.receipt != null) { + response.success = true + } else { + response.note = `found queueEntry but no receipt: ${utils.stringifyReduce(deserialized.txid)} ${deserialized.txid + } ${deserialized.timestamp}` + } + respond(response, serializeRequestReceiptForTxResp) + } catch (e) { + this.mainLogger.error(`${route} error: ${e.message} stack: ${e.stack}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + respond(response, serializeRequestReceiptForTxResp) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary(requestReceiptForTxBinaryHandler.name, requestReceiptForTxBinaryHandler.handler) + + const requestStateForTxPostBinaryHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_request_state_for_tx_post, + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_request_state_for_tx_post + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + nestedCountersInstance.countEvent('internal', route) + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + try { + const response: RequestStateForTxPostResp = { + stateList: [], + beforeHashes: {}, + note: '', + success: false, + } + + const txId = header.verification_data + let queueEntry = this.transactionQueue.getQueueEntrySafe(txId) + if (queueEntry == null) { + queueEntry = this.transactionQueue.getQueueEntryArchived(txId, route) + } + if (queueEntry == null) { + response.note = `failed to find queue entry: ${utils.stringifyReduce(txId)} dbg:${this.debugTXHistory[utils.stringifyReduce(txId)] + }` + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `${route} cant find queue entry`) + return respond(response, serializeRequestStateForTxPostResp) + } + + if (queueEntry.hasValidFinalData === false) { + response.note = `has queue entry but not final data: ${utils.stringifyReduce(txId)} dbg:${this.debugTXHistory[utils.stringifyReduce(txId)] + }` + + if (logFlags.error && logFlags.verbose) this.mainLogger.error(response.note) + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `${route} hasValidFinalData==false, tx state: ${queueEntry.state}`) + return respond(response, serializeRequestStateForTxPostResp) + } + + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestStateForTxPostReq) + if (!requestStream) { + errorHandler(RequestErrorEnum.InvalidRequest) + return respond(response, serializeRequestStateForTxPostResp) + } + + const req = deserializeRequestStateForTxPostReq(requestStream) + // app.getRelevantData(accountId, tx) -> wrappedAccountState for local accounts + let wrappedStates = this.useAccountWritesOnly ? {} : queueEntry.collectedData + const applyResponse = queueEntry?.preApplyTXResult.applyResponse + if (applyResponse != null && applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { + const writtenAccountsMap: { [id: string]: unknown } = {} + for (const writtenAccount of applyResponse.accountWrites) { + writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data + } + wrappedStates = writtenAccountsMap + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_state_for_tx_post applyResponse.accountWrites tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp} accounts: ${utils.stringifyReduce(Object.keys(wrappedStates))}`) + } + + if (wrappedStates != null) { + for (const [key, accountData] of Object.entries(wrappedStates)) { + const typedAccountData = accountData as any + if (req.key !== typedAccountData.accountId) { + continue // Not this account. + } + + if (typedAccountData.stateId != req.hash) { + response.note = `failed accountData.stateId != req.hash txid: ${utils.makeShortHash( + req.txid + )} hash:${utils.makeShortHash(typedAccountData.stateId)}` + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `${route} failed accountData.stateId != req.hash txid`) + return respond(response, serializeRequestStateForTxPostResp) + } + if (typedAccountData) { + response.beforeHashes[key] = queueEntry.beforeHashes[key] + response.stateList.push(typedAccountData) + } + } + } + nestedCountersInstance.countEvent('stateManager', `${route} success`) + response.success = true + return respond(response, serializeRequestStateForTxPostResp) + } catch (e) { + if (logFlags.error) this.mainLogger.error(`${route} error: ${utils.errorToStringFull(e)}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + respond({ stateList: [], beforeHashes: {}, note: '', success: false }, serializeRequestStateForTxPostResp) + } finally { + profilerInstance.scopedProfileSectionEnd(route, payload.length) + } + }, + } + + this.p2p.registerInternalBinary(requestStateForTxPostBinaryHandler.name, requestStateForTxPostBinaryHandler.handler) + + const requestTxAndStateBinaryHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_request_tx_and_state, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_request_tx_and_state + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, false, payload.length) + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + let response: RequestTxAndStateResp = { + stateList: [], + account_state_hash_before: {}, + account_state_hash_after: {}, + note: '', + success: false, + appReceiptData: null, + } + try { + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestTxAndStateReq) + if (!requestStream) { + errorHandler(RequestErrorEnum.InvalidRequest) + respond(response, serializeRequestTxAndStateResp) + return + } + + const req: RequestTxAndStateReq = deserializeRequestTxAndStateReq(requestStream) + + const txid = req.txid + const requestedAccountIds = req.accountIds + + let queueEntry = this.transactionQueue.getQueueEntrySafe(txid) + if (queueEntry == null) { + queueEntry = this.transactionQueue.getQueueEntryArchived(txid, route) + } + + if (queueEntry == null) { + response.note = `failed to find queue entry: ${utils.stringifyReduce(txid)} dbg:${this.debugTXHistory[utils.stringifyReduce(txid)] + }` + + if (logFlags.error) this.mainLogger.error(`${route} ${response.note}`) + respond(response, serializeRequestTxAndStateResp) + return + } + + if (queueEntry.isInExecutionHome === false) { + response.note = `${route} not in execution group: ${utils.stringifyReduce(txid)}` + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) + respond(response, serializeRequestTxAndStateResp) + return + } + + let receipt2 = this.getSignedReceipt(queueEntry) + if (receipt2 == null) { + response.note = `${route} does not have valid receipt2: ${utils.stringifyReduce(txid)}` + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) + respond(response, serializeRequestTxAndStateResp) + return + } + + let wrappedStates = this.useAccountWritesOnly ? {} : queueEntry.collectedData + + // if we have applyResponse then use it. This is where and advanced apply() will put its transformed data + const writtenAccountsMap: { [id: string]: unknown } = {} + const applyResponse = queueEntry?.preApplyTXResult.applyResponse + if (applyResponse != null && applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { + for (const writtenAccount of applyResponse.accountWrites) { + writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data + } + wrappedStates = writtenAccountsMap + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state applyResponse.accountWrites tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp} accounts: ${utils.stringifyReduce(Object.keys(wrappedStates))} `) + } + + //TODO figure out if we need to include collectedFinalData (after refactor/cleanup) + + if (wrappedStates != null) { + for (let i = 0; i < receipt2.proposal.accountIDs.length; i++) { + let key = receipt2.proposal.accountIDs[i] + let accountData = wrappedStates[key] + if (accountData && requestedAccountIds.includes(key)) { + // eslint-disable-next-line security/detect-object-injection + response.account_state_hash_before[key] = receipt2.proposal.beforeStateHashes[i] + response.account_state_hash_after[key] = receipt2.proposal.afterStateHashes[i] + response.stateList.push(accountData) + } + } + response.appReceiptData = queueEntry.preApplyTXResult?.applyResponse?.appReceiptData + } + response.success = true + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state success: ${queueEntry.logID} ${response.stateList.length} ${Utils.safeStringify(response)}`) + respond(response, serializeRequestTxAndStateResp) + } catch (e) { + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) Context.logger.getLogger('p2p').error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) + respond(response, serializeRequestTxAndStateResp) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + }, + } + + const requestTxAndStateBeforeBinaryHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_request_tx_and_state_before, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_request_tx_and_state_before + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, false, payload.length) + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + let response: RequestTxAndStateResp = { + stateList: [], + account_state_hash_before: {}, + account_state_hash_after: {}, + note: '', + success: false, + } + try { + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestTxAndStateReq) + if (!requestStream) { + errorHandler(RequestErrorEnum.InvalidRequest) + respond(response, serializeRequestTxAndStateResp) + return + } + + const req: RequestTxAndStateReq = deserializeRequestTxAndStateReq(requestStream) + + const txid = req.txid + const requestedAccountIds = req.accountIds + + let queueEntry = this.transactionQueue.getQueueEntrySafe(txid) + if (queueEntry == null) { + queueEntry = this.transactionQueue.getQueueEntryArchived(txid, route) + } + + if (queueEntry == null) { + response.note = `failed to find queue entry: ${utils.stringifyReduce(txid)} dbg:${this.debugTXHistory[utils.stringifyReduce(txid)] + }` + + if (logFlags.error) this.mainLogger.error(`${route} ${response.note}`) + respond(response, serializeRequestTxAndStateResp) + return + } + + if (queueEntry.isInExecutionHome === false) { + response.note = `${route} not in execution group: ${utils.stringifyReduce(txid)}` + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) + respond(response, serializeRequestTxAndStateResp) + return + } + + let receipt2 = this.getSignedReceipt(queueEntry) + if (receipt2 == null) { + response.note = `${route} does not have valid receipt2: ${utils.stringifyReduce(txid)}` + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) + respond(response, serializeRequestTxAndStateResp) + return + } + + // we just need to send collected state + for (const accountId of requestedAccountIds) { + const beforeState = queueEntry.collectedData[accountId] + const index = receipt2.proposal.accountIDs.indexOf(accountId) + if (beforeState && beforeState.stateId === receipt2.proposal.beforeStateHashes[index]) { + response.stateList.push(queueEntry.collectedData[accountId]) + } else { + response.note = `has bad beforeStateAccount: ${utils.stringifyReduce(txid)} dbg:${this.debugTXHistory[utils.stringifyReduce(txid)] + }` + if (logFlags.error) this.mainLogger.error(`${route} ${response.note}`) + respond(response, serializeRequestTxAndStateResp) + return + } + } + response.success = true + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state_before success: ${queueEntry.logID} ${response.stateList.length} ${Utils.safeStringify(response)}`) + respond(response, serializeRequestTxAndStateResp) + } catch (e) { + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) Context.logger.getLogger('p2p').error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) + respond(response, serializeRequestTxAndStateResp) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary(requestTxAndStateBinaryHandler.name, requestTxAndStateBinaryHandler.handler) + + this.p2p.registerInternalBinary( + requestTxAndStateBeforeBinaryHandler.name, + requestTxAndStateBeforeBinaryHandler.handler + ) + + const binaryGetAccDataWithQueueHintsHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_get_account_data_with_queue_hints, + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_get_account_data_with_queue_hints + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + nestedCountersInstance.countEvent('internal', route) + + try { + let accountData = null + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAccountDataWithQueueHintsReq) + if (!requestStream) { + // implement error handling + nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) + return respond(BadRequest(`${route} invalid request`), serializeResponseError) + } + const req = deserializeGetAccountDataWithQueueHintsReq(requestStream) + const MAX_ACCOUNTS = this.config.stateManager.accountBucketSize + if (req.accountIds.length > MAX_ACCOUNTS) { + nestedCountersInstance.countEvent('internal', `${route}-too_many_accounts`) + return respond(BadRequest(`${route} too many accounts requested`), serializeResponseError) + } + if (utils.isValidShardusAddress(req.accountIds) === false) { + nestedCountersInstance.countEvent('internal', `${route}-invalid_account_ids`) + return respond(BadRequest(`${route} invalid account_ids`), serializeResponseError) + } + let ourLockID = -1 + try { + ourLockID = await this.fifoLock('accountModification') + accountData = await this.app.getAccountDataByList(req.accountIds) + } finally { + this.fifoUnlock('accountModification', ourLockID) + } + if (accountData != null) { + for (const wrappedAccount of accountData) { + const wrappedAccountInQueueRef = wrappedAccount as ShardusTypes.WrappedDataFromQueue + wrappedAccountInQueueRef.seenInQueue = false + + if (this.lastSeenAccountsMap != null) { + const queueEntry = this.lastSeenAccountsMap[wrappedAccountInQueueRef.accountId] + if (queueEntry != null) { + wrappedAccountInQueueRef.seenInQueue = true + } + } + } + } + + const resp: GetAccountDataWithQueueHintsRespSerializable = { + accountData: accountData as ShardusTypes.WrappedDataFromQueue[] | null, + // this can still be null + } + respond(resp, serializeGetAccountDataWithQueueHintsResp) + } catch (e) { + if (logFlags.error || logFlags.getLocalOrRemote) + this.mainLogger.error(`${route} error: ${utils.errorToStringFull(e)}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `handler: ${e.message} `) + return respond(InternalError(`${route} exception executing request`), serializeResponseError) + } finally { + profilerInstance.scopedProfileSectionEnd(route, payload.length) + } + }, + } + + this.p2p.registerInternalBinary( + binaryGetAccDataWithQueueHintsHandler.name, + binaryGetAccDataWithQueueHintsHandler.handler + ) + + const binaryGetAccountQueueCountHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_get_account_queue_count, + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_get_account_queue_count + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + nestedCountersInstance.countEvent('internal', route) + try { + const requestStream = VectorBufferStream.fromBuffer(payload) + const requestType = requestStream.readUInt16() + if (requestType !== TypeIdentifierEnum.cGetAccountQueueCountReq) { + // implement error handling + respond(false, serializeGetAccountQueueCountResp) + return + } + const req = deserializeGetAccountQueueCountReq(requestStream) + // Limit the number of accounts to prevent abuse + const MAX_ACCOUNTS = this.config.stateManager.accountBucketSize // default 200 + if (req.accountIds.length > MAX_ACCOUNTS) { + nestedCountersInstance.countEvent('internal', `${route}-too_many_accounts`) + return respond(BadRequest(`${route} too many accounts requested`), serializeResponseError) + } + const result: GetAccountQueueCountResp = { + counts: [], + committingAppData: [], + accounts: [], + } + if (utils.isValidShardusAddress(req.accountIds) === false) { + nestedCountersInstance.countEvent('internal', `${route}-invalid_account_ids`) + respond(false, serializeGetAccountQueueCountResp) + return + } + for (const address of req.accountIds) { + const { count, committingAppData } = this.transactionQueue.getAccountQueueCount(address, true) + result.counts.push(count) + result.committingAppData.push(committingAppData) + if (this.config.stateManager.enableAccountFetchForQueueCounts) { + const currentAccountData = await this.getLocalOrRemoteAccount(address) + if (currentAccountData && currentAccountData.data) { + result.accounts.push(currentAccountData.data) + } + } + } + respond(result, serializeGetAccountQueueCountResp) + } catch (e) { + if (logFlags.error) this.mainLogger.error(`${route} error: ${e}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + respond(false, serializeGetAccountQueueCountResp) + } finally { + profilerInstance.scopedProfileSectionEnd(route, payload.length) + } + }, + } + + this.p2p.registerInternalBinary(binaryGetAccountQueueCountHandler.name, binaryGetAccountQueueCountHandler.handler) + + Context.network.registerExternalGet('debug_stats', isDebugModeMiddleware, (_req, res) => { + const cycle = this.currentCycleShardData.cycleNumber - 1 + + let cycleShardValues = null + if (this.shardValuesByCycle.has(cycle)) { + cycleShardValues = this.shardValuesByCycle.get(cycle) + } + + const blob = this.partitionStats.dumpLogsForCycle(cycle, false, cycleShardValues) + res.json({ cycle, blob }) + }) + + Context.network.registerExternalGet('debug_stats2', isDebugModeMiddleware, (_req, res) => { + const cycle = this.currentCycleShardData.cycleNumber - 1 + + let blob = {} + let cycleShardValues = null + if (this.shardValuesByCycle.has(cycle)) { + cycleShardValues = this.shardValuesByCycle.get(cycle) + blob = this.partitionStats.buildStatsReport(cycleShardValues) + } + res.json({ cycle, blob }) + }) + + Context.network.registerExternalGet('clear_tx_debug', isDebugModeMiddlewareLow, (_req, res) => { + this.transactionQueue.clearTxDebugStatList() + res.json({ success: true }) + }) + + Context.network.registerExternalGet('print_tx_debug', isDebugModeMiddlewareLow, (_req, res) => { + const result = this.transactionQueue.printTxDebug() + res.write(result) + res.end() + }) + + Context.network.registerExternalGet('print_tx_debug_by_txid', isDebugModeMiddlewareLow, (_req, res) => { + const txId = _req.query.txId + if (txId == null) { + res.write('txId parameter required') + res.end() + return + } + if (typeof txId !== 'string') { + res.write('txId parameter must be a string') + res.end() + return + } + const result = this.transactionQueue.printTxDebugByTxId(txId) + res.write(result) + res.end() + }) + + Context.network.registerExternalGet('last_process_stats', isDebugModeMiddlewareLow, (_req, res) => { + const result = JSON.stringify(this.transactionQueue.lastProcessStats, null, 2) + res.write(result) + res.end() + }) + + //a debug nodelist so tools can map nodes to the shortIDs that we use + Context.network.registerExternalGet('nodelist_debug', isDebugModeMiddleware, (_req, res) => { + const debugNodeList = [] + for (const node of activeByIdOrder) { + const nodeEntry = { + id: utils.makeShortHash(node.id), + ip: node.externalIp, + port: node.externalPort, + } + debugNodeList.push(nodeEntry) + } + res.json(debugNodeList) + }) + + Context.network.registerExternalGet('debug-consensus-log', isDebugModeMiddleware, (req, res) => { + this.consensusLog = !this.consensusLog + res.write(`consensusLog: ${this.consensusLog}`) + res.end() + }) + + Context.network.registerExternalGet('debug-noncequeue-count', isDebugModeMiddleware, (req, res) => { + let result = this.transactionQueue.getPendingCountInNonceQueue() + res.json(result) + res.end() + }) + + Context.network.registerExternalGet('debug-queue-item-by-txid', isDebugModeMiddlewareLow, (_req, res) => { + const txId = _req.query.txId + if (txId == null || typeof txId !== 'string' || txId.length !== 64) { + res.write('invalid txId provided') + res.end() + return + } + const result = this.transactionQueue.getQueueItemById(txId) + res.write(Utils.safeStringify(result)) + res.end() + }) + + Context.network.registerExternalGet('debug-queue-items', isDebugModeMiddleware, (req, res) => { + let result = this.transactionQueue.getQueueItems() + res.write(Utils.safeStringify(result)) + res.end() + }) + + Context.network.registerExternalGet('debug-queue-clear', isDebugModeMiddleware, (req, res) => { + let minAge = req.query.minAge ? parseInt(req.query.minAge as string) : -1 + if (isNaN(minAge)) minAge = -1 + let result = this.transactionQueue.clearQueueItems(minAge) + res.write(Utils.safeStringify(result)) + res.end() + }) + + Context.network.registerExternalGet('debug-stuck-tx', isDebugModeMiddleware, (_req, res) => { + const opts = { + minAge: _req.query?.minAge || 0, + state: _req.query?.state, + nextStates: _req.query?.nextStates === 'false' ? false : true, + } + res.json(this.transactionQueue.getDebugStuckTxs(opts)) + }) + + Context.network.registerExternalGet('debug-stuck-processing', isDebugModeMiddleware, (_req, res) => { + res.json(this.transactionQueue.getDebugProccessingStatus()) + }) + + Context.network.registerExternalGet('debug-fix-stuck-processing', isDebugModeMiddleware, (req, res) => { + let response = 'not stuck' + + //initialize the variable clear with the value of the query parameter clear, the default is false + const clear = req.query.clear === 'true' || false + + const isStuck = this.transactionQueue.isStuckProcessing + if (isStuck) { + response = Utils.safeStringify(this.transactionQueue.getDebugProccessingStatus()) + this.transactionQueue.fixStuckProcessing(clear) + } + res.write(response) + res.end() + }) + + Context.network.registerExternalGet('debug-fifoLocks', isDebugModeMiddleware, (req, res) => { + const getAll = req.query.all === 'true' || false + let toPrint = this.fifoLocks + if (getAll === false) { + toPrint = this.getLockedFifoAccounts() + } + const response = JSON.stringify(toPrint, null, 2) + res.write(response) + res.end() + }) + Context.network.registerExternalGet('debug-fifoLocks-unlock', isDebugModeMiddleware, (_req, res) => { + const unlockCount = this.forceUnlockAllFifoLocks('debug-fifoLocks-unlock') + + const response = JSON.stringify({ unlockCount }, null, 2) + res.write(response) + res.end() + }) +}, + + _unregisterEndpoints() { + // this.p2p.unregisterInternal('get_account_data3') + // this.p2p.unregisterInternal('get_account_data_by_list') + + // new shard endpoints: + // this.p2p.unregisterInternal('request_state_for_tx') + // this.p2p.unregisterInternal('request_state_for_tx_post') + // this.p2p.unregisterInternal('request_tx_and_state') + + // this.p2p.unregisterInternal('request_receipt_for_tx') + // this.p2p.unregisterInternal('broadcast_state') + this.p2p.unregisterGossipHandler('spread_tx_to_group') + // this.p2p.unregisterInternal('get_account_data_with_queue_hints') + // this.p2p.unregisterInternal('get_globalaccountreport') + // this.p2p.unregisterInternal('spread_appliedVote') + this.p2p.unregisterGossipHandler('spread_appliedReceipt') + + // this.p2p.unregisterInternal('get_trie_hashes') + // this.p2p.unregisterInternal('sync_trie_hashes') + // this.p2p.unregisterInternal('get_trie_accountHashes') + // this.p2p.unregisterInternal('get_account_data_by_hashes') + + for (const binary_endpoint of Object.values(InternalRouteEnum)) { + this.p2p.unregisterInternal(binary_endpoint) + } +}, + + _registerListener(emitter: EventEmitter, event: string, callback: Callback) { + // eslint-disable-next-line security/detect-object-injection + if (this._listeners[event]) { + this.statemanager_fatal(`_registerListener_dupes`, 'State Manager can only register one listener per event!') + return + } + emitter.on(event, callback) + // eslint-disable-next-line security/detect-object-injection + this._listeners[event] = [emitter, callback] +}, + + _unregisterListener(event: string) { + /* eslint-disable security/detect-object-injection */ + if (!this._listeners[event]) { + this.mainLogger.warn(`This event listener doesn't exist! Event: \`${event}\` in StateManager`) + return + } + const entry = this._listeners[event] + const [emitter, callback] = entry + emitter.removeListener(event, callback) + delete this._listeners[event] + /* eslint-enable security/detect-object-injection */ +}, + + _cleanupListeners() { + for (const event of Object.keys(this._listeners)) { + this._unregisterListener(event) + } + } +} \ No newline at end of file diff --git a/src/state-manager/FIFO.ts b/src/state-manager/FIFO.ts new file mode 100644 index 000000000..0d65852eb --- /dev/null +++ b/src/state-manager/FIFO.ts @@ -0,0 +1,209 @@ +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as utils from '../utils' +import { FifoLockObjectMap } from './state-manager-types' +// StringBoolObjectMap type defined inline +type StringBoolObjectMap = { [key: string]: boolean } + +export const fifoMethods = { + async fifoLock(fifoName: string): Promise { + if (this.config.stateManager.fifoUnlockFix3 === true) { + return + } + + const stack = '' // new Error().stack + if (logFlags.debug) this.mainLogger.debug(`fifoLock: ${fifoName} ${stack}`) + + // eslint-disable-next-line security/detect-object-injection + let thisFifo = this.fifoLocks[fifoName] + if (thisFifo == null) { + thisFifo = { + fifoName, + queueCounter: 0, + waitingList: [], + lastServed: 0, + queueLocked: false, + lockOwner: 1, + lastLock: shardusGetTime(), + } + // eslint-disable-next-line security/detect-object-injection + this.fifoLocks[fifoName] = thisFifo + } + thisFifo.queueCounter++ + const ourID = thisFifo.queueCounter + const entry = { id: ourID } + + if (fifoName === 'accountModification') { + nestedCountersInstance.countEvent('fifo-backup', `accountModification ${thisFifo.waitingList.length}`) + } + + if (thisFifo.waitingList.length > 0 || thisFifo.queueLocked) { + thisFifo.waitingList.push(entry) + // wait till we are at the front of the queue, and the queue is not locked + while ((thisFifo.waitingList.length > 0 && thisFifo.waitingList[0]?.id !== ourID) || thisFifo.queueLocked) { + // todo perf optimization to reduce the amount of times we have to sleep (attempt to come out of sleep at close to the right time) + let sleepEstimate = ourID - thisFifo.lastServed + if (sleepEstimate < 1) { + sleepEstimate = 1 + } + await utils.sleep(1 * sleepEstimate) + // await utils.sleep(2) + } + // remove our entry from the array + thisFifo.waitingList.shift() + } + + // lock things so that only our calling function can do work + thisFifo.queueLocked = true + thisFifo.lockOwner = ourID + thisFifo.lastServed = ourID + //this can be used to cleanup old fifo locks + thisFifo.lastLock = shardusGetTime() + return ourID + }, + + fifoUnlock(fifoName: string, id: number) { + if (this.config.stateManager.fifoUnlockFix3 === true) { + return + } + + const stack = '' // new Error().stack + if (logFlags.debug) this.mainLogger.debug(`fifoUnlock: ${fifoName} ${stack}`) + + // eslint-disable-next-line security/detect-object-injection + const thisFifo = this.fifoLocks[fifoName] + if (id === -1 || !thisFifo) { + return // nothing to do + } + if (thisFifo.lockOwner === id) { + thisFifo.queueLocked = false + } else if (id !== -1) { + // this should never happen as long as we are careful to use try/finally blocks + this.statemanager_fatal(`fifoUnlock`, `Failed to unlock the fifo ${thisFifo.fifoName}: ${id}`) + } + }, + + /** + * bulkFifoLockAccounts + * @param {string[]} accountIDs + */ + async bulkFifoLockAccounts(accountIDs: string[]) { + if (this.config.stateManager.fifoUnlockFix3 === true) { + return [] + } + // lock all the accounts we will modify + const wrapperLockId = await this.fifoLock('atomicWrapper') + const ourLocks = [] + const seen: StringBoolObjectMap = {} + for (const accountKey of accountIDs) { + // eslint-disable-next-line security/detect-object-injection + if (seen[accountKey] === true) { + ourLocks.push(-1) //lock skipped, so add a placeholder + continue + } + // eslint-disable-next-line security/detect-object-injection + seen[accountKey] = true + const ourLockID = await this.fifoLock(accountKey) + ourLocks.push(ourLockID) + } + this.fifoUnlock('atomicWrapper', wrapperLockId) + return ourLocks + }, + + /** + * bulkFifoUnlockAccounts + * @param {string[]} accountIDs + * @param {number[]} ourLocks + */ + bulkFifoUnlockAccounts(accountIDs: string[], ourLocks: number[]) { + if (this.config.stateManager.fifoUnlockFix3 === true) { + return + } + const seen: StringBoolObjectMap = {} + + // unlock the accounts we locked + /* eslint-disable security/detect-object-injection */ + for (let i = 0; i < ourLocks.length; i++) { + const accountID = accountIDs[i] + if (seen[accountID] === true) { + continue + } + seen[accountID] = true + const ourLockID = ourLocks[i] + if (ourLockID == -1) { + this.statemanager_fatal( + `bulkFifoUnlockAccounts_fail`, + `bulkFifoUnlockAccounts hit placeholder i:${i} ${utils.stringifyReduce({ accountIDs, ourLocks })} ` + ) + } + + this.fifoUnlock(accountID, ourLockID) + } + /* eslint-enable security/detect-object-injection */ + }, + + getLockedFifoAccounts(): FifoLockObjectMap { + const results = {} + if (this.fifoLocks != null) { + for (const [key, value] of Object.entries(this.fifoLocks)) { + const fifoLock = value as any + if (fifoLock.queueLocked) { + // eslint-disable-next-line security/detect-object-injection + results[key] = fifoLock + } + } + } + return results + }, + + /** + * this funtion will unlock all fifo locks that are currently locked + * ideally we should not be calling this, but it is currently needed + * as we try to transition to more stable fifo locks. + * @param tag + * @returns + */ + forceUnlockAllFifoLocks(tag: string): number { + nestedCountersInstance.countEvent('processing', 'forceUnlockAllFifoLocks ' + tag) + + const locked = this.getLockedFifoAccounts() + let clearCount = 0 + for (const value of Object.values(locked)) { + const fifoLock = value as any + fifoLock.queueLocked = false + fifoLock.waitingList = [] + //set this so we don't clean it up too soon. + fifoLock.lastLock = shardusGetTime() + //value.queueCounter + //do we need to fix up counters + clearCount++ + } + return clearCount + }, + + /** + * now that we have fixes a but that was stomping fifo locks we could have a problem + * where the memory grows forever. This function will clean up old locks that are no longer needed. + */ + clearStaleFifoLocks() { + try { + const time = shardusGetTime() - 1000 * 60 * 10 //10 minutes ago + const keysToDelete = [] + for (const [key, value] of Object.entries(this.fifoLocks)) { + const fifoLock = value as any + if (fifoLock.lastLock < time && fifoLock.queueLocked === false) { + keysToDelete.push(key) + } + } + + for (const key of keysToDelete) { + // eslint-disable-next-line security/detect-object-injection + delete this.fifoLocks[key] + } + nestedCountersInstance.countEvent('stateManager', 'clearStaleFifoLocks', keysToDelete.length) + } catch (err) { + this.mainLogger.error(`clearStaleFifoLocks: ${err}`) + } + } +} \ No newline at end of file diff --git a/src/state-manager/Receipt.ts b/src/state-manager/Receipt.ts new file mode 100644 index 000000000..f12db7d12 --- /dev/null +++ b/src/state-manager/Receipt.ts @@ -0,0 +1,185 @@ +import { + QueueEntry, + SignedReceipt, + Proposal, + AppliedReceipt, +} from './state-manager-types' +import { ReceiptMapResult } from '@shardeum-foundation/lib-types/build/src/state-manager/StateManagerTypes' +import { logFlags } from '../logger' +import * as utils from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as ShardusTypes from '../shardus/shardus-types' +import * as StateManagerTypes from './state-manager-types' + +/*** + * ######## ######## ###### ######## #### ######## ######## ###### + * ## ## ## ## ## ## ## ## ## ## ## ## + * ## ## ## ## ## ## ## ## ## ## + * ######## ###### ## ###### ## ######## ## ###### + * ## ## ## ## ## ## ## ## ## + * ## ## ## ## ## ## ## ## ## ## ## + * ## ## ######## ###### ######## #### ## ## ###### + */ + +export const receiptMethods = { + getSignedReceipt(queueEntry: QueueEntry): SignedReceipt { + if (queueEntry.signedReceiptFinal != null) { + return queueEntry.signedReceiptFinal + } + let finalReceipt: SignedReceipt + if (queueEntry.signedReceipt && queueEntry.receivedSignedReceipt == null) { + finalReceipt = queueEntry.signedReceipt + } + if (queueEntry.signedReceipt == null && queueEntry.receivedSignedReceipt) { + // or see if we got one + finalReceipt = queueEntry.receivedSignedReceipt + } + // if we had to repair use that instead. this stomps the other ones + if (queueEntry.signedReceiptForRepair != null) { + finalReceipt = queueEntry.signedReceiptForRepair + } + queueEntry.signedReceiptFinal = finalReceipt + return finalReceipt + }, + + hasReceipt(queueEntry: QueueEntry) { + return this.getSignedReceipt(queueEntry) != null + }, + getReceiptResult(queueEntry: QueueEntry) { + const receipt = this.getSignedReceipt(queueEntry) + if (receipt) { + return receipt.proposal.applied + } + return false + }, + + getReceiptProposal(queueEntry: QueueEntry): Proposal { + const receipt = this.getSignedReceipt(queueEntry) + if (receipt) { + return receipt.proposal + } + }, + + generateReceiptMapResults(lastCycle: ShardusTypes.Cycle): ReceiptMapResult[] { + const results: ReceiptMapResult[] = [] + + const cycleToSave = lastCycle.counter + + //init results per partition + const receiptMapByPartition: Map = new Map() + for (let i = 0; i < this.currentCycleShardData.shardGlobals.numPartitions; i++) { + const mapResult: ReceiptMapResult = { + cycle: cycleToSave, + partition: i, + receiptMap: {}, + txCount: 0, + txsMap: {}, + txsMapEVMReceipt: {}, + } + receiptMapByPartition.set(i, mapResult) + // add to the list we will return + results.push(mapResult) + } + + // todo add to ReceiptMapResult in shardus types + // txsMap: {[id:string]:WrappedResponse[]}; + // txsMapEVMReceipt: {[id:string]:unknown[]}; + + const queueEntriesToSave: QueueEntry[] = [] + for (const queueEntry of this.transactionQueue._transactionQueue) { + if (queueEntry.cycleToRecordOn === cycleToSave) { + // make sure we have a receipt + const receipt: SignedReceipt = this.getSignedReceipt(queueEntry) + + if (receipt == null) { + //check && queueEntry.globalModification === false because global accounts will not get a receipt, should this change? + /* prettier-ignore */ if(logFlags.error && queueEntry.globalModification === false) this.mainLogger.error(`generateReceiptMapResults found entry in with no receipt in newAcceptedTxQueue. ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + } else { + queueEntriesToSave.push(queueEntry) + } + } + } + + // I am worried that archiveQueueEntries being capped to 5k could cause a reciept breakdown + // if cycle times are long enough to have more than 5000 txs on a node. + // I think we should maybe be working on these as we go rather than processing them in a batch. + + for (const queueEntry of this.transactionQueue.archivedQueueEntries) { + if (queueEntry.cycleToRecordOn === cycleToSave) { + // make sure we have a receipt + const receipt: SignedReceipt = this.getSignedReceipt(queueEntry) + + if (receipt == null) { + //check && queueEntry.globalModification === false + //we dont expect expired TXs to have a receipt. this should reduce log spam + if (queueEntry.state != 'expired') { + /* prettier-ignore */ if(logFlags.error && queueEntry.globalModification === false) this.mainLogger.error(`generateReceiptMapResults found entry in with no receipt in archivedQueueEntries. ${utils.stringifyReduce(queueEntry.acceptedTx)} state:${queueEntry.state}`) + } + } else { + queueEntriesToSave.push(queueEntry) + } + } + } + + const netId = '123abc' + //go over the save list.. + for (const queueEntry of queueEntriesToSave) { + const accountData: ShardusTypes.WrappedResponse[] = queueEntry?.preApplyTXResult?.applyResponse?.accountData + if (accountData == null) { + /* prettier-ignore */ nestedCountersInstance.countRareEvent('generateReceiptMapResults' , `accountData==null tests: ${queueEntry?.preApplyTXResult == null} ${queueEntry?.preApplyTXResult?.applyResponse == null} ${queueEntry?.preApplyTXResult?.applyResponse?.accountData == null}` ) + } + // delete the localCache + if (accountData != null) { + for (const account of accountData) { + delete account.localCache + } + } + // console.log('accountData accountData', accountData) + for (const partition of queueEntry.involvedPartitions) { + const receipt: SignedReceipt = this.getSignedReceipt(queueEntry) + + const status = receipt.proposal.applied === true ? 'applied' : 'rejected' + const txHash = queueEntry.acceptedTx.txId + const obj = { tx: queueEntry.acceptedTx.data, status, netId } + const txResultFullHash = this.crypto.hash(obj) + const txIdShort = utils.short(txHash) + const txResult = utils.short(txResultFullHash) + + /* eslint-disable security/detect-object-injection */ + if (receiptMapByPartition.has(partition)) { + const mapResult: ReceiptMapResult = receiptMapByPartition.get(partition) + //create an array if we have not seen this index yet + if (mapResult.receiptMap[txIdShort] == null) { + mapResult.receiptMap[txIdShort] = [] + } + + // TODO: too much data duplication to put accounts and receitps in mapResult + // They get duplicated per involved partition currently. + // They should be in a separate list I think.. + + let gotAppReceipt = false + //set receipt data. todo get evmReceiptForTX from receipt. + if (receipt.proposal.appReceiptDataHash != null && receipt.proposal.appReceiptDataHash != '') { + const applyResponse = queueEntry?.preApplyTXResult?.applyResponse + // we may not always have appReceiptData... especially in execute in local shard + if (applyResponse && applyResponse.appReceiptDataHash === receipt.proposal.appReceiptDataHash) { + mapResult.txsMapEVMReceipt[txIdShort] = applyResponse.appReceiptData + gotAppReceipt = true + } + } + + nestedCountersInstance.countEvent('stateManager', `gotAppReceipt:${gotAppReceipt}`) + + mapResult.txsMap[txIdShort] = accountData // For tx data to save in Explorer + + //push the result. note the order is not deterministic unless we were to sort at the end. + mapResult.receiptMap[txIdShort].push(txResult) + mapResult.txCount++ + } + /* eslint-enable security/detect-object-injection */ + } + } + + return results + } +} diff --git a/src/state-manager/RemoteAccount.ts b/src/state-manager/RemoteAccount.ts new file mode 100644 index 000000000..eb8ea86f4 --- /dev/null +++ b/src/state-manager/RemoteAccount.ts @@ -0,0 +1,420 @@ +import { logFlags } from '../logger' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as utils from '../utils' +import * as Comms from '../p2p/Comms' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { + GetAccountQueueCountReq, + serializeGetAccountQueueCountReq, +} from '../types/GetAccountQueueCountReq' +import { + deserializeGetAccountQueueCountResp, + GetAccountQueueCountResp, +} from '../types/GetAccountQueueCountResp' +import { + QueueCountsResult, + GetAccountDataWithQueueHintsResp, + WrappedResponses, +} from './state-manager-types' +import { + GetAccountDataWithQueueHintsReqSerializable, + serializeGetAccountDataWithQueueHintsReq, +} from '../types/GetAccountDataWithQueueHintsReq' +import { + deserializeGetAccountDataWithQueueHintsResp, + GetAccountDataWithQueueHintsRespSerializable, +} from '../types/GetAccountDataWithQueueHintsResp' +import * as NodeList from '../p2p/NodeList' +import * as ShardusTypes from '../shardus/shardus-types' +import { isServiceMode } from '../debug' +import * as ShardFunctions from '../state-manager/shardFunctions' +import { ResponseError } from '../types/ResponseError' +import { RequestAccountQueueCounts, QueueCountsResponse } from './state-manager-types' +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' + +export const remoteAccountMethods = { + async getLocalOrRemoteAccountQueueCount(address: string): Promise { + let count: number = -1 + let committingAppData: unknown = undefined + let account: unknown = undefined + if (this.currentCycleShardData == null) { + await this.waitForShardData() + } + if (this.currentCycleShardData == null) { + throw new Error('getLocalOrRemoteAccount: network not ready') + } + let forceLocalGlobalLookup = false + if (this.accountGlobals.isGlobalAccount(address)) { + forceLocalGlobalLookup = true + } + + let accountIsRemote = this.transactionQueue.isAccountRemote(address) + if (forceLocalGlobalLookup) { + accountIsRemote = false + } + + if (accountIsRemote) { + const maxRetry = 3 + let success = false + let retryCount = 0 + const triedConsensusNodeIds: string[] = [] + + while (success === false && retryCount < maxRetry) { + retryCount += 1 + const randomConsensusNode = this.transactionQueue.getRandomConsensusNodeForAccount( + address, + triedConsensusNodeIds + ) + if (randomConsensusNode == null) { + this.statemanager_fatal( + 'getLocalOrRemoteAccountQueueCount', + `No consensus node found for account ${address}, retry ${retryCount}` + ) + continue // will retry another node if counts permit + } + // record already tried consensus node + triedConsensusNodeIds.push(randomConsensusNode.id) + + // Node Precheck! + if ( + this.isNodeValidForInternalMessage( + randomConsensusNode.id, + 'getLocalOrRemoteAccountQueueCount', + true, + true, + true, + true + ) === false + ) { + /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, `getLocalOrRemoteAccountQueueCount: isNodeValidForInternalMessage failed, retry ${retryCount}`) + continue // will retry another node if counts permit + } + + const message: RequestAccountQueueCounts = { accountIds: [address] } + let r: QueueCountsResponse | false + + try { + // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.getAccountQueueCountBinary) { + const serialized_res = await this.p2p.askBinary( + randomConsensusNode, + InternalRouteEnum.binary_get_account_queue_count, + message, + serializeGetAccountQueueCountReq, + deserializeGetAccountQueueCountResp, + {} + ) + r = serialized_res as QueueCountsResponse + // } else { + // r = await this.p2p.ask(randomConsensusNode, 'get_account_queue_count', message) + // } + } catch (error) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL getLocalOrRemoteAccountQueueCount: askBinary ex: ${error.message}`) + r = null + } + + if (!r) { + if (logFlags.error) this.mainLogger.error('ASK FAIL getLocalOrRemoteAccountQueueCount r === false') + } + + const result = r as QueueCountsResponse + if (result != null && result.counts != null && result.counts.length > 0) { + count = result.counts[0] + committingAppData = result.committingAppData[0] + if (this.config.stateManager.enableAccountFetchForQueueCounts) { + account = result.accounts[0] + } + success = true + /* prettier-ignore */ if (logFlags.verbose) console.log(`queue counts response: ${count} address:${utils.stringifyReduce(address)}`) + } else { + if (result == null) { + /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, 'remote request missing data 2: result == null') + } else if (result.counts == null) { + /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, 'remote request missing data 2: result.counts == null ' + utils.stringifyReduce(result)) + } else if (result.counts.length <= 0) { + /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, 'remote request missing data 2: result.counts.length <= 0 ' + utils.stringifyReduce(result)) + } + /* prettier-ignore */ if (logFlags.verbose) console.log(`queue counts failed: ${utils.stringifyReduce(result)} address:${utils.stringifyReduce(address)}`) + } + } + } else { + // we are local! + const queueCountResult = this.transactionQueue.getAccountQueueCount(address) + count = queueCountResult.count + committingAppData = queueCountResult.committingAppData + if (this.config.stateManager.enableAccountFetchForQueueCounts) { + const currentAccountData = await this.getLocalOrRemoteAccount(address) + if (currentAccountData) { + account = currentAccountData.data + } + } + /* prettier-ignore */ if (logFlags.verbose) console.log(`queue counts local: ${count} address:${utils.stringifyReduce(address)}`) + } + + return { count, committingAppData, account } + }, + + // todo support metadata so we can serve up only a portion of the account + // todo 2? communicate directly back to client... could have security issue. + // todo 3? require a relatively stout client proof of work + async getLocalOrRemoteAccount( + address: string, + opts: { + useRICache: boolean // enables the RI cache. enable only for immutable data + canThrowException?: boolean + } = { useRICache: false, canThrowException: false } + ): Promise { + let wrappedAccount: ShardusTypes.WrappedDataFromQueue | null = null + if (!isServiceMode()) { + if (this.currentCycleShardData == null) { + await this.waitForShardData() + } + // TSConversion since this should never happen due to the above function should we assert that the value is non null?. Still need to figure out the best practice. + if (this.currentCycleShardData == null) { + throw new Error('getLocalOrRemoteAccount: network not ready') + } + } + + // If enabled, check the RI cache first + if (opts.useRICache) { + const riCacheResult = await this.app.getCachedRIAccountData([address]) + if (riCacheResult != null) { + if (riCacheResult.length > 0) { + nestedCountersInstance.countEvent('stateManager', 'getLocalOrRemoteAccount: RI cache hit') + if (logFlags.verbose) this.mainLogger.debug(`getLocalOrRemoteAccount: RI cache hit for ${address}`) + wrappedAccount = riCacheResult[0] as ShardusTypes.WrappedDataFromQueue + return wrappedAccount + } + } else { + nestedCountersInstance.countEvent('stateManager', 'getLocalOrRemoteAccount: RI cache miss') + } + } + + let forceLocalGlobalLookup = false + + if (this.accountGlobals.isGlobalAccount(address) || isServiceMode()) { + forceLocalGlobalLookup = true + } + + //it seems backwards that isServiceMode would treat the account as always remote, as it has access to all data locally + let accountIsRemote = isServiceMode() ? true : this.transactionQueue.isAccountRemote(address) + + // hack to say we have all the data + if (!isServiceMode()) { + if (this.currentCycleShardData.nodes.length <= this.currentCycleShardData.shardGlobals.consensusRadius) { + accountIsRemote = false + } + } + if (forceLocalGlobalLookup) { + accountIsRemote = false + } + + if (accountIsRemote) { + let randomConsensusNode: P2PTypes.NodeListTypes.Node + const preCheckLimit = 5 + for (let i = 0; i < preCheckLimit; i++) { + randomConsensusNode = this.transactionQueue.getRandomConsensusNodeForAccount(address) + if (randomConsensusNode == null) { + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `precheck: no consensus node found`) + throw new Error(`getLocalOrRemoteAccount: no consensus node found`) + } + // Node Precheck!. this check our internal records to find a good node to talk to. + // it is worth it to look through the list if needed. + if ( + this.isNodeValidForInternalMessage( + randomConsensusNode.id, + 'getLocalOrRemoteAccount', + true, + true, + true, + true + ) === false + ) { + //we got to the end of our tries? + if (i >= preCheckLimit - 1) { + /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'getLocalOrRemoteAccount: isNodeValidForInternalMessage failed, no retry') + //return null ....better to throw an error + if (opts.canThrowException) { + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `precheck: out of nodes to try`) + throw new Error(`getLocalOrRemoteAccount: no consensus nodes worth asking`) + } else return null + } + } else { + break + } + } + + const message = { accountIds: [address] } + + let r: GetAccountDataWithQueueHintsResp + + // if ( + // this.config.p2p.useBinarySerializedEndpoints && + // this.config.p2p.getAccountDataWithQueueHintsBinary + // ) { + try { + const serialized_res = await this.p2p.askBinary( + randomConsensusNode, + InternalRouteEnum.binary_get_account_data_with_queue_hints, + message, + serializeGetAccountDataWithQueueHintsReq, + deserializeGetAccountDataWithQueueHintsResp, + {} + ) + r = serialized_res as GetAccountDataWithQueueHintsResp + } catch (er) { + if (er instanceof ResponseError && logFlags.error) { + this.mainLogger.error( + `ASK FAIL getLocalOrRemoteAccount exception: ResponseError encountered. Code: ${er.Code}, AppCode: ${er.AppCode}, Message: ${er.Message}` + ) + } + if (logFlags.verbose || logFlags.getLocalOrRemote) this.mainLogger.error('askBinary', er) + if (opts.canThrowException) { + throw er + } else { + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `askBinary ex: ${er?.message}`) + } + } + // } else { + // r = await this.p2p.ask(randomConsensusNode, 'get_account_data_with_queue_hints', message) + // } + + if (!r) { + if (logFlags.error || logFlags.getLocalOrRemote) + this.mainLogger.error('ASK FAIL getLocalOrRemoteAccount r === false') + if (opts.canThrowException) throw new Error(`getLocalOrRemoteAccount: remote node had an exception`) + } + + const result = r as GetAccountDataWithQueueHintsResp + if (result != null && result.accountData != null && result.accountData.length > 0) { + wrappedAccount = result.accountData[0] + if (wrappedAccount == null) { + if (logFlags.verbose || logFlags.getLocalOrRemote) + this.getAccountFailDump(address, 'remote result.accountData[0] == null') + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote result.accountData[0] == null`) + } + return wrappedAccount + } else { + //these cases probably should throw an error to, but dont wont to over prescribe the format yet + //if the remote node has a major breakdown it should return false + if (result == null) { + /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'remote request missing data: result == null') + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote else.. result == null`) + } else if (result.accountData == null) { + /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'remote request missing data: result.accountData == null ' + utils.stringifyReduce(result)) + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote else.. result.accountData == null`) + } else if (result.accountData.length <= 0) { + /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'remote request missing data: result.accountData.length <= 0 ' + utils.stringifyReduce(result)) + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote else.. result.accountData.length <= 0 `) + } + } + } else { + // we are local! + const accountData = await this.app.getAccountDataByList([address]) + if (accountData != null) { + for (const wrappedAccountEntry of accountData) { + // We are going to add in new data here, which upgrades the account wrapper to a new type. + const expandedRef = wrappedAccountEntry as ShardusTypes.WrappedDataFromQueue + expandedRef.seenInQueue = false + + if (this.lastSeenAccountsMap != null) { + const queueEntry = this.lastSeenAccountsMap[expandedRef.accountId] + if (queueEntry != null) { + expandedRef.seenInQueue = true + } + } + wrappedAccount = expandedRef + } + } else { + //this should probably throw as we expect a [] for the real empty case + //avoiding too many changes + if (logFlags.verbose || logFlags.getLocalOrRemote) + this.getAccountFailDump(address, 'getAccountDataByList() returned null') + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `localload: getAccountDataByList() returned null`) + return null + } + // there must have been an issue in the past, but for some reason we are checking the first element in the array now. + if (accountData[0] == null) { + if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'accountData[0] == null') + nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `localload: accountData[0] == null`) + } + if (accountData.length > 1 || accountData.length == 0) { + /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, `getAccountDataByList() returned wrong element count: ${accountData}`) + nestedCountersInstance.countEvent( + 'getLocalOrRemoteAccount', + `localload: getAccountDataByList() returned wrong element count` + ) + } + return wrappedAccount + } + return null + }, + + getAccountFailDump(address: string, message: string) { + // this.currentCycleShardData + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('getAccountFailDump', ` `, `${utils.makeShortHash(address)} ${message} `) + }, + + // HOMENODEMATHS is this used by any apps? it is not used by shardus + async getRemoteAccount(address: string) { + let wrappedAccount: unknown + + await this.waitForShardData() + // TSConversion since this should never happen due to the above function should we assert that the value is non null?. Still need to figure out the best practice. + if (this.currentCycleShardData == null) { + throw new Error('getRemoteAccount: network not ready') + } + + const homeNode = ShardFunctions.findHomeNode( + this.currentCycleShardData.shardGlobals, + address, + this.currentCycleShardData.parititionShardDataMap + ) + if (homeNode == null) { + throw new Error(`getRemoteAccount: no home node found`) + } + + // Node Precheck! TODO implement retry + if (this.isNodeValidForInternalMessage(homeNode.node.id, 'getRemoteAccount', true, true) === false) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('getRemoteAccount: isNodeValidForInternalMessage failed, no retry yet') + return null + } + + const message = { accountIds: [address] } + let result: GetAccountDataWithQueueHintsResp + // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.getAccountDataWithQueueHintsBinary) { + try { + const serialized_res = await this.p2p.askBinary( + homeNode.node, + InternalRouteEnum.binary_get_account_data_with_queue_hints, + message, + serializeGetAccountDataWithQueueHintsReq, + deserializeGetAccountDataWithQueueHintsResp, + {} + ) + result = serialized_res as GetAccountDataWithQueueHintsResp + } catch (er) { + if (er instanceof ResponseError && logFlags.error) { + this.mainLogger.error( + `ASK FAIL getRemoteAccount exception: ResponseError encountered. Code: ${er.Code}, AppCode: ${er.AppCode}, Message: ${er.Message}` + ) + } else if (logFlags.verbose) this.mainLogger.error('ASK FAIL getRemoteAccount exception:', er) + return null + } + // } else { + // result = await this.p2p.ask(homeNode.node, 'get_account_data_with_queue_hints', message) + // } + + if (!result) { + if (logFlags.error) this.mainLogger.error('ASK FAIL getRemoteAccount result === false') + } + if (result === null) { + if (logFlags.error) this.mainLogger.error('ASK FAIL getRemoteAccount result === null') + } + if (result != null && result.accountData != null && result.accountData.length > 0) { + wrappedAccount = result.accountData[0] + return wrappedAccount + } + + return null + } +} \ No newline at end of file diff --git a/src/state-manager/Shard.ts b/src/state-manager/Shard.ts new file mode 100644 index 000000000..47b09c610 --- /dev/null +++ b/src/state-manager/Shard.ts @@ -0,0 +1,312 @@ + +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as utils from '../utils' +import * as NodeList from '../p2p/NodeList' +import * as CycleChain from '../p2p/CycleChain' +import ShardFunctions from './shardFunctions' +import { CycleShardData } from './state-manager-types' +import * as ShardusTypes from '../shardus/shardus-types' +import * as StateManagerTypes from './state-manager-types' + +export const shardMethods = { + updateShardValues(cycleNumber: number, mode: P2PTypes.ModesTypes.Record['mode']) { + if (this.currentCycleShardData == null) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_firstCycle', `${cycleNumber}`, ` first init `) + } + + const cycleShardData = {} as CycleShardData + + // lets make sure shard calculation are happening at a consistent interval + const calculationTime = shardusGetTime() + if (this.lastShardCalculationTS > 0) { + const delay = calculationTime - this.lastShardCalculationTS - this.config.p2p.cycleDuration * 1000 + + if (delay > 5000) { + this.statemanager_fatal( + `updateShardValues-delay > 5s ${delay / 1000}`, + `updateShardValues-delay ${delay / 1000}` + ) + } else if (delay > 4000) { + nestedCountersInstance.countEvent('stateManager', 'updateShardValues delay > 4s') + } else if (delay > 3000) { + nestedCountersInstance.countEvent('stateManager', 'updateShardValues delay > 3s') + } else if (delay > 2000) { + nestedCountersInstance.countEvent('stateManager', 'updateShardValues delay > 2s') + } + + cycleShardData.calculationTime = calculationTime + } + this.lastShardCalculationTS = calculationTime + + // todo get current cycle.. store this by cycle? + cycleShardData.nodeShardDataMap = new Map() + cycleShardData.parititionShardDataMap = new Map() + cycleShardData.nodes = this.getNodesForCycleShard(mode) + cycleShardData.activeFoundationNodes = cycleShardData.nodes.filter((node) => node.foundationNode) + cycleShardData.cycleNumber = cycleNumber + cycleShardData.partitionsToSkip = new Map() + cycleShardData.hasCompleteData = false + + if (this.lastActiveCount === -1) { + this.lastActiveCount = cycleShardData.nodes.length + } else { + const change = cycleShardData.nodes.length - this.lastActiveCount + if (change != 0) { + /* prettier-ignore */ nestedCountersInstance.countEvent('networkSize', `cyc:${cycleNumber} active:${cycleShardData.nodes.length} change:${change}`) + } + this.lastActiveCount = cycleShardData.nodes.length + } + + try { + // cycleShardData.ourNode = NodeList.nodes.get(Self.id) + cycleShardData.ourNode = NodeList.nodes.get(this.p2p.getNodeId()) + } catch (ex) { + if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_notactive', `${cycleNumber}`, ` `) + return + } + + if (cycleShardData.nodes.length === 0) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_noNodeListAvailable', `${cycleNumber}`, ` `) + return // no active nodes so stop calculating values + } + + if (this.config === null || this.config.sharding === null) { + throw new Error('this.config.sharding === null') + } + + const cycle = this.p2p.state.getLastCycle() + if (cycle !== null && cycle !== undefined) { + cycleShardData.timestamp = cycle.start * 1000 + cycleShardData.timestampEndCycle = (cycle.start + cycle.duration) * 1000 + } + + const edgeNodes = this.config.sharding.nodesPerEdge as number + + // save this per cycle? + cycleShardData.shardGlobals = ShardFunctions.calculateShardGlobals( + cycleShardData.nodes.length, + this.config.sharding.nodesPerConsensusGroup as number, + edgeNodes + ) + + this.profiler.profileSectionStart('updateShardValues_computePartitionShardDataMap1') //13ms, #:60 + // partition shard data + ShardFunctions.computePartitionShardDataMap( + cycleShardData.shardGlobals, + cycleShardData.parititionShardDataMap, + 0, + cycleShardData.shardGlobals.numPartitions + ) + this.profiler.profileSectionEnd('updateShardValues_computePartitionShardDataMap1') + + this.profiler.profileSectionStart('updateShardValues_computePartitionShardDataMap2') //37ms, #:60 + // generate limited data for all nodes data for all nodes. + ShardFunctions.computeNodePartitionDataMap( + cycleShardData.shardGlobals, + cycleShardData.nodeShardDataMap, + cycleShardData.nodes, + cycleShardData.parititionShardDataMap, + cycleShardData.nodes, + false + ) + this.profiler.profileSectionEnd('updateShardValues_computePartitionShardDataMap2') + + this.profiler.profileSectionStart('updateShardValues_computeNodePartitionData') //22ms, #:60 + // get extended data for our node + cycleShardData.nodeShardData = ShardFunctions.computeNodePartitionData( + cycleShardData.shardGlobals, + cycleShardData.ourNode, + cycleShardData.nodeShardDataMap, + cycleShardData.parititionShardDataMap, + cycleShardData.nodes, + true + ) + this.profiler.profileSectionEnd('updateShardValues_computeNodePartitionData') + + // This is currently redudnant if we move to lazy init of extended data we should turn it back on + // this.profiler.profileSectionStart('updateShardValues_computeNodePartitionDataMap1') // 4ms, #:60 + // TODO perf scalability need to generate this as needed in very large networks with millions of nodes. + // generate full data for nodes that store our home partition + // + // ShardFunctions.computeNodePartitionDataMap(cycleShardData.shardGlobals, cycleShardData.nodeShardDataMap, cycleShardData.nodeShardData.nodeThatStoreOurParitionFull, cycleShardData.parititionShardDataMap, cycleShardData.nodes, true, false) + // this.profiler.profileSectionEnd('updateShardValues_computeNodePartitionDataMap1') + + // cycleShardData.nodeShardData = cycleShardData.nodeShardDataMap.get(cycleShardData.ourNode.id) + + this.profiler.profileSectionStart('updateShardValues_computeNodePartitionDataMap2') //232ms, #:60 + // generate lightweight data for all active nodes (note that last parameter is false to specify the lightweight data) + const fullDataForDebug = true // Set this to false for performance reasons!!! setting it to true saves us from having to recalculate stuff when we dump logs. + ShardFunctions.computeNodePartitionDataMap( + cycleShardData.shardGlobals, + cycleShardData.nodeShardDataMap, + cycleShardData.nodes, + cycleShardData.parititionShardDataMap, + cycleShardData.nodes, + fullDataForDebug + ) + this.profiler.profileSectionEnd('updateShardValues_computeNodePartitionDataMap2') + + // TODO if fullDataForDebug gets turned false we will update the guts of this calculation + // ShardFunctions.computeNodePartitionDataMapExt(cycleShardData.shardGlobals, cycleShardData.nodeShardDataMap, cycleShardData.nodes, cycleShardData.parititionShardDataMap, cycleShardData.nodes) + + this.currentCycleShardData = cycleShardData + this.shardValuesByCycle.set(cycleNumber, cycleShardData) + + // calculate nodes that would just now start syncing edge data because the network shrank. + if (cycleShardData.ourNode.status === 'active') { + this.profiler.profileSectionStart('updateShardValues_getOrderedSyncingNeighbors') //0 + // calculate if there are any nearby nodes that are syncing right now. + if (logFlags.verbose) this.mainLogger.debug(`updateShardValues: getOrderedSyncingNeighbors`) + cycleShardData.syncingNeighbors = this.p2p.state.getOrderedSyncingNeighbors(cycleShardData.ourNode) + this.profiler.profileSectionEnd('updateShardValues_getOrderedSyncingNeighbors') + + if (cycleShardData.syncingNeighbors.length > 0) { + //old: add all syncing nodes + cycleShardData.syncingNeighborsTxGroup = [...cycleShardData.syncingNeighbors] + //TODO filter syncingNeighborsTxGroup to nodes that would care..(cover our data) + // for(let node in cycleShardData.syncingNeighbors){ + + // ShardFunctions. + // } + cycleShardData.syncingNeighborsTxGroup.push(cycleShardData.ourNode) + cycleShardData.hasSyncingNeighbors = true + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_neighbors', `${cycleShardData.cycleNumber}`, ` neighbors: ${utils.stringifyReduce(cycleShardData.syncingNeighbors.map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort))}`) + } else { + cycleShardData.hasSyncingNeighbors = false + } + + if (logFlags.console) console.log(`updateShardValues cycle:${cycleShardData.cycleNumber} `) + + // if (this.preTXQueue.length > 0) { + // for (let tx of this.preTXQueue) { + // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_preTX', ` `, ` ${utils.stringifyReduce(tx)} `) + // this.transactionQueue.routeAndQueueAcceptedTransaction(tx, false, null) + // } + // this.preTXQueue = [] + // } + this.profiler.profileSectionStart('updateShardValues_updateRuntimeSyncTrackers') //0 + this.accountSync.updateRuntimeSyncTrackers() + this.profiler.profileSectionEnd('updateShardValues_updateRuntimeSyncTrackers') + // this.calculateChangeInCoverage() + } + + this.profiler.profileSectionStart('updateShardValues_getPartitionLists') // 0 + // calculate our consensus partitions for use by data repair: + // cycleShardData.ourConsensusPartitions = [] + const partitions = ShardFunctions.getConsenusPartitionList( + cycleShardData.shardGlobals, + cycleShardData.nodeShardData + ) + cycleShardData.ourConsensusPartitions = partitions + + const partitions2 = ShardFunctions.getStoredPartitionList(cycleShardData.shardGlobals, cycleShardData.nodeShardData) + cycleShardData.ourStoredPartitions = partitions2 + + this.profiler.profileSectionEnd('updateShardValues_getPartitionLists') + + // this will be a huge log. + // Temp disable for log size + // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_cycleData', `${cycleNumber}`, ` cycleShardData: cycle:${cycleNumber} data: ${utils.stringifyReduce(cycleShardData)}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_cycleData', `${cycleNumber}`, ` cycleShardData: cycle:${this.currentCycleShardData.cycleNumber} `) + + this.lastActiveNodeCount = cycleShardData.nodes.length + + cycleShardData.hasCompleteData = true + }, + + calculateChangeInCoverage(): void { + // maybe this should be a shard function so we can run unit tests on it for expanding or shrinking networks! + const newSharddata = this.currentCycleShardData + + if (newSharddata == null || this.currentCycleShardData == null) { + return + } + + let cycleToCompareTo = newSharddata.cycleNumber - 1 + + //if this is our first time to sync we should attempt to compare to an older cycle + if (this.firstTimeToRuntimeSync === true) { + this.firstTimeToRuntimeSync = false + + //make sure the cycle started is an older one + if (this.accountSync.syncStatement.cycleStarted < cycleToCompareTo) { + cycleToCompareTo = this.accountSync.syncStatement.cycleStarted + } else { + //in theory we could just return but I dont want to change that side of the branch yet. + } + } + + const oldShardData = this.shardValuesByCycle.get(cycleToCompareTo) + + if (oldShardData == null) { + // log ? + return + } + const cycle = this.currentCycleShardData.cycleNumber + // oldShardData.shardGlobals, newSharddata.shardGlobals + const coverageChanges = ShardFunctions.computeCoverageChanges( + oldShardData.nodeShardData, + newSharddata.nodeShardData + ) + + this.coverageChangesCopy = coverageChanges + + for (const change of coverageChanges) { + // log info about the change. + // ${utils.stringifyReduce(change)} + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_change', `${oldShardData.cycleNumber}->${newSharddata.cycleNumber}`, ` ${ShardFunctions.leadZeros8(change.start.toString(16))}->${ShardFunctions.leadZeros8(change.end.toString(16))} `) + + // create a range object from our coverage change. + + const range = { + startAddr: 0, + endAddr: 0, + low: '', + high: '', + } as any // BasicAddressRange type + range.startAddr = change.start + range.endAddr = change.end + range.low = ShardFunctions.leadZeros8(range.startAddr.toString(16)) + '0'.repeat(56) + range.high = ShardFunctions.leadZeros8(range.endAddr.toString(16)) + 'f'.repeat(56) + // create sync trackers + this.accountSync.createSyncTrackerByRange(range, cycle) + } + + if (coverageChanges.length > 0) { + this.accountSync.syncRuntimeTrackers() + } + // launch sync trackers + // coverage changes... should have a list of changes + // should note if the changes are an increase or reduction in covered area. + // log the changes. + // next would be to create some syncTrackers based to cover increases + }, + + getCurrentCycleShardData(): CycleShardData | null { + if (this.currentCycleShardData === null) { + const cycle = this.p2p.state.getLastCycle() + if (cycle === null || cycle === undefined) { + return null + } + this.updateShardValues(cycle.counter, cycle.mode) + } + + return this.currentCycleShardData + }, + + hasCycleShardData() { + return this.currentCycleShardData != null + }, + + async waitForShardCalcs() { + while (this.currentCycleShardData == null) { + this.getCurrentCycleShardData() + await utils.sleep(1000) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_waitForShardData_firstNode', ``, ` ${utils.stringifyReduce(this.currentCycleShardData)} `) + } + } +} diff --git a/src/state-manager/TransactionConsensus.handlers.ts b/src/state-manager/TransactionConsensus.handlers.ts new file mode 100644 index 000000000..a919d65a5 --- /dev/null +++ b/src/state-manager/TransactionConsensus.handlers.ts @@ -0,0 +1,676 @@ +import * as Context from '../p2p/Context' +import * as Comms from '../p2p/Comms' +import * as NodeList from '../p2p/NodeList' +import * as Self from '../p2p/Self' +import * as CycleChain from '../p2p/CycleChain' +import { isDebugModeMiddleware, isDebugModeMiddlewareLow } from '../network/debugMiddleware' +import { randomUUID } from 'crypto' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { getTxTimestampReq, deserializeGetTxTimestampReq, serializeGetTxTimestampReq } from '../types/GetTxTimestampReq' +import { getTxTimestampResp, deserializeGetTxTimestampResp, serializeGetTxTimestampResp } from '../types/GetTxTimestampResp' +import { getStreamWithTypeCheck, requestErrorHandler } from '../types/Helpers' +import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' +import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' +import { InternalBinaryHandler } from '../types/Handler' +import { Route } from '@shardeum-foundation/lib-types/build/src/p2p/P2PTypes' +import { BadRequest, InternalError, serializeResponseError } from '../types/ResponseError' +import { deserializeGetConfirmOrChallengeReq, GetConfirmOrChallengeReq, serializeGetConfirmOrChallengeReq } from '../types/GetConfirmOrChallengeReq' +import { deserializeGetConfirmOrChallengeResp, GetConfirmOrChallengeResp, serializeGetConfirmOrChallengeResp } from '../types/GetConfirmOrChallengeResp' +import { SignedReceipt } from './state-manager-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { Utils } from '@shardeum-foundation/lib-types' +import { logFlags } from '../logger' +import { Request, Response } from 'express-serve-static-core' +import { errorToStringFull } from '../utils' +import * as utils from '../utils' +import { Node } from '../shardus/shardus-types' +import { profilerInstance } from '../utils/profiler' +import * as Shardus from '../shardus/shardus-types' +import { AppliedVoteHash } from './state-manager-types' +import { + PoqoDataAndReceiptReq, + serializePoqoDataAndReceiptReq, + deserializePoqoDataAndReceiptResp, +} from '../types/PoqoDataAndReceiptReq' +import { + deserializePoqoSendReceiptReq, + PoqoSendReceiptReq, + serializePoqoSendReceiptReq, +} from '../types/PoqoSendReceiptReq' +import { + deserializePoqoSendVoteReq, + serializePoqoSendVoteReq, +} from '../types/PoqoSendVoteReq' + +export const handlerMethods = { + setupHandlers(): void { + Context.network.registerExternalGet('debug-poqo-fail', isDebugModeMiddleware, (req, res) => { + try { + const newChance = req.query.newChance + if (typeof newChance !== 'string' || !newChance) { + res.write(`debug-poqo-fail: missing param newChance ${this.debugFailPOQo}\n`) + res.end() + return + } + const newChanceInt = parseFloat(newChance) + if (newChanceInt >= 1) { + res.write(`debug-poqo-fail: newChance not a float: ${this.debugFailPOQo}\n`) + res.end() + return + } + this.debugFailPOQo = newChanceInt + res.write(`debug-poqo-fail: set: ${this.debugFailPOQo}\n`) + } catch (e) { + res.write(`debug-poqo-fail: error: ${this.debugFailPOQo}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-poq-switch', isDebugModeMiddleware, (_req, res) => { + try { + this.stateManager.transactionQueue.useNewPOQ = !this.stateManager.transactionQueue.useNewPOQ + res.write(`this.useNewPOQ: ${this.stateManager.transactionQueue.useNewPOQ}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-poq-wait-before-confirm', isDebugModeMiddleware, (_req, res) => { + try { + const waitTimeBeforeConfirm = _req.query.waitTimeBeforeConfirm as string + if (waitTimeBeforeConfirm && !isNaN(parseInt(waitTimeBeforeConfirm))) + this.config.stateManager.waitTimeBeforeConfirm = parseInt(waitTimeBeforeConfirm) + res.write(`stateManager.waitTimeBeforeConfirm: ${this.config.stateManager.waitTimeBeforeConfirm}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-poq-wait-limit-confirm', isDebugModeMiddleware, (_req, res) => { + try { + const waitLimitAfterFirstVote = _req.query.waitLimitAfterFirstVote as string + if (waitLimitAfterFirstVote && !isNaN(parseInt(waitLimitAfterFirstVote))) + this.config.stateManager.waitLimitAfterFirstVote = parseInt(waitLimitAfterFirstVote) + res.write(`stateManager.waitLimitAfterFirstVote: ${this.config.stateManager.waitLimitAfterFirstVote}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-poq-wait-before-receipt', isDebugModeMiddleware, (_req, res) => { + try { + const waitTimeBeforeReceipt = _req.query.waitTimeBeforeReceipt as string + if (waitTimeBeforeReceipt && !isNaN(parseInt(waitTimeBeforeReceipt))) + this.config.stateManager.waitTimeBeforeReceipt = parseInt(waitTimeBeforeReceipt) + res.write(`stateManager.waitTimeBeforeReceipt: ${this.config.stateManager.waitTimeBeforeReceipt}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-poq-wait-limit-receipt', isDebugModeMiddleware, (_req, res) => { + try { + const waitLimitAfterFirstMessage = _req.query.waitLimitAfterFirstMessage as string + if (waitLimitAfterFirstMessage && !isNaN(parseInt(waitLimitAfterFirstMessage))) + this.config.stateManager.waitLimitAfterFirstMessage = parseInt(waitLimitAfterFirstMessage) + res.write(`stateManager.waitLimitAfterFirstVote: ${this.config.stateManager.waitLimitAfterFirstMessage}\n`) + } catch (e) { + res.write(`${e}\n`) + } + res.end() + }) + + Context.network.registerExternalGet('debug-produceBadVote', isDebugModeMiddleware, (req, res) => { + this.produceBadVote = !this.produceBadVote + res.json({ status: 'ok', produceBadVote: this.produceBadVote }) + }) + + Context.network.registerExternalGet('debug-produceBadChallenge', isDebugModeMiddleware, (req, res) => { + this.produceBadChallenge = !this.produceBadChallenge + res.json({ status: 'ok', produceBadChallenge: this.produceBadChallenge }) + }) + + Context.network.registerExternalGet( + 'debug-profile-tx-timestamp-endpoint', + isDebugModeMiddleware, + async (req, res) => { + try { + const { offset } = req.query + + res.write('Profiling tx timestamp endpoint of all network nodes\n') + + const randomTxId = Context.crypto.hash(randomUUID()) + const cycleMarker = CycleChain.getCurrentCycleMarker() + const cycleCounter = CycleChain.newest.counter + + const stats = new Map() + const failed = new Map() + + const p2pPromises = Array.from(NodeList.nodes.values()) + .filter((node) => node.id !== Self.id) + .map((node) => { + const start = Date.now() + return (this.p2p as any) + .askBinary( + node, + InternalRouteEnum.binary_get_tx_timestamp, + { + cycleMarker, + cycleCounter, + txId: randomTxId, + }, + serializeGetTxTimestampReq, + deserializeGetTxTimestampResp, + {}, + '', + false, + offset ? parseInt(`${offset}`) : 30 * 1000 + ) + .then(() => { + const end = Date.now() + stats.set(`${node.externalIp}:${node.externalPort}`, end - start) + }) + .catch(() => { + const end = Date.now() + failed.set(`${node.externalIp}:${node.externalPort}`, end - start) + }) + }) + + // Wait for all requests to finish + await Promise.allSettled(p2pPromises) + + // Compute statistics + const responseTimes = Array.from(stats.values()).sort((a, b) => a - b) + const medianResponseTime = responseTimes[Math.floor(responseTimes.length / 2)] || 0 + const averageResponseTime = responseTimes.reduce((a, b) => a + b, 0) / (responseTimes.length || 1) + const failedNodes = Array.from(failed.keys()) + + console.log('Profiling results:', { + medianResponseTime, + averageResponseTime, + failedNodes, + stats, + }) + res.write('Profiling results:\n') + res.write(`Median response time: ${medianResponseTime}ms\n`) + res.write(`Average response time: ${averageResponseTime}ms\n`) + res.write(`Failed nodes: ${failedNodes.join(', ')}\n`) + res.write('Detailed stats:\n') + res.write(`Node,Response Time\n`) + stats.forEach((responseTime, node) => { + res.write(`${node},${responseTime}\n`) + }) + res.end() + } catch (error) { + console.error('Unexpected error:', error) + res.write(`\n{"error":"Internal Server Error","details":"${error.message}"}`) + res.end() + } + } + ) + + const getTxTimestampBinary: Route> = { + name: InternalRouteEnum.binary_get_tx_timestamp, + handler: async (payload, respond, header) => { + const route = InternalRouteEnum.binary_get_tx_timestamp + this.profiler.scopedProfileSectionStart(route) + nestedCountersInstance.countEvent('internal', route) + profilerInstance.scopedProfileSectionStart(route, true, payload.length) + + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + let tsReceipt: Shardus.TimestampReceipt + try { + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetTxTimestampReq) + if (!requestStream) { + errorHandler(RequestErrorEnum.InvalidRequest) + return respond(tsReceipt, serializeGetTxTimestampResp) + } + + const readableReq = deserializeGetTxTimestampReq(requestStream) + // handle rare race condition where we have seen the txId but not the timestamp + if ( + Context.config.p2p.timestampCacheFix && + this.seenTimestampRequests.has(readableReq.txId) && + !this.txTimestampCacheByTxId.has(readableReq.txId) + ) { + nestedCountersInstance.countEvent('consensus', 'get_tx_timestamp seen txId but found no timestamp') + return respond(BadRequest('get_tx_timestamp seen txId but found no timestamp'), serializeResponseError) + } + this.seenTimestampRequests.add(readableReq.txId) + tsReceipt = this.getOrGenerateTimestampReceiptFromCache( + readableReq.txId, + readableReq.cycleMarker, + readableReq.cycleCounter + ) + return respond(tsReceipt, serializeGetTxTimestampResp) + } catch (e) { + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) + respond(tsReceipt, serializeGetTxTimestampResp) + } finally { + profilerInstance.scopedProfileSectionEnd(route, payload.length) + } + }, + } + + this.p2p.registerInternalBinary(getTxTimestampBinary.name, getTxTimestampBinary.handler) + + const getConfirmOrChallengeBinaryHandler: Route> = { + name: InternalRouteEnum.binary_get_confirm_or_challenge, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_get_confirm_or_challenge + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, true, payload.length) + const confirmOrChallengeResult: GetConfirmOrChallengeResp = { + txId: '', + appliedVoteHash: '', + result: null, + uniqueCount: 0, + } + try { + const reqStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetConfirmOrChallengeReq) + if (!reqStream) { + nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) + respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) + return + } + const request = deserializeGetConfirmOrChallengeReq(reqStream) + const { txId } = request + let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(txId) + if (queueEntry == null) { + // It is ok to search the archive for this. Not checking this was possibly breaking the gossip chain before + queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, route) + } + if (queueEntry == null) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge no queue entry for ${txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(txId)]}`) + respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) + return + } + if (queueEntry.receivedBestConfirmation == null && queueEntry.receivedBestChallenge == null) { + nestedCountersInstance.countEvent('consensus', 'get_confirm_or_challenge no confirmation or challenge') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge no confirmation or challenge for ${queueEntry.logID}, bestVote: ${Utils.safeStringify(queueEntry.receivedBestVote)}, bestConfirmation: ${Utils.safeStringify(queueEntry.receivedBestConfirmation)}`) + respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) + return + } + + // refine the result and unique count + const { receivedBestChallenge, receivedBestConfirmation, uniqueChallengesCount } = queueEntry + if (receivedBestChallenge && uniqueChallengesCount >= this.config.stateManager.minRequiredChallenges) { + confirmOrChallengeResult.result = receivedBestChallenge + confirmOrChallengeResult.uniqueCount = uniqueChallengesCount + } else { + confirmOrChallengeResult.result = receivedBestConfirmation + confirmOrChallengeResult.uniqueCount = 1 + } + respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) + } catch (e) { + // Error handling + if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge error ${e.message}`) + respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary(getConfirmOrChallengeBinaryHandler.name, getConfirmOrChallengeBinaryHandler.handler) + + Comms.registerGossipHandler('poqo-receipt-gossip', (payload: SignedReceipt & { txGroupCycle: number }) => { + profilerInstance.scopedProfileSectionStart('poqo-receipt-gossip') + try { + const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.proposal.txid) + if (queueEntry == null) { + nestedCountersInstance.countEvent('poqo', 'error: gossip skipped: no queue entry') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-receipt-gossip no queue entry for ${payload.proposal.txid}`) + return + } + if (payload.txGroupCycle) { + if (queueEntry.txGroupCycle !== payload.txGroupCycle) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-receipt-gossip mismatch txGroupCycle for txid: ${payload.proposal.txid}, sender's txGroupCycle: ${payload.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) + nestedCountersInstance.countEvent( + 'poqo', + 'poqo-receipt-gossip: mismatch txGroupCycle for txid ' + payload.proposal.txid + ) + } + delete payload.txGroupCycle + } + + if (queueEntry.hasSentFinalReceipt === true) { + // We've already send this receipt, no need to send it again + return + } + + if (logFlags.verbose) + this.mainLogger.debug(`POQo: received receipt from gossip for ${queueEntry.logID} forwarding gossip`) + + const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) + const hasTwoThirdsMajority = this.verifyAppliedReceipt(payload, executionGroupNodes) + if (!hasTwoThirdsMajority) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${payload.proposal.txid}`) + nestedCountersInstance.countEvent('poqo', 'poqo-receipt-gossip: Rejecting receipt because no majority') + return + } + + queueEntry.signedReceipt = { ...payload } + payload.txGroupCycle = queueEntry.txGroupCycle + Comms.sendGossip( + 'poqo-receipt-gossip', + payload, + null, + null, + queueEntry.transactionGroup, + false, + 4, + payload.proposal.txid, + '', + true + ) + + queueEntry.hasSentFinalReceipt = true + + // If the queue entry does not have the valid final data then request that + if (!queueEntry.hasValidFinalData) { + setTimeout(async () => { + // Check if we have final data + if (queueEntry.hasValidFinalData) { + return + } + if (logFlags.verbose) + this.mainLogger.debug(`poqo-receipt-gossip: requesting final data for ${queueEntry.logID}`) + nestedCountersInstance.countEvent('request-final-data', 'final data timeout, making explicit request') + + const nodesToAskKeys = payload.signaturePack?.map((signature) => signature.owner) + + await this.stateManager.transactionQueue.requestFinalData( + queueEntry, + payload.proposal.accountIDs, + nodesToAskKeys + ) + + nestedCountersInstance.countEvent('request-final-data', 'final data received') + }, this.config.stateManager.nonExWaitForData) + } + } finally { + profilerInstance.scopedProfileSectionEnd('poqo-receipt-gossip') + } + }) + + const poqoDataAndReceiptBinaryHandler: Route> = { + name: InternalRouteEnum.binary_poqo_data_and_receipt, + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_poqo_data_and_receipt + this.profiler.scopedProfileSectionStart(route, false) + try { + const _sender = header.sender_id + const reqStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cPoqoDataAndReceiptReq) + if (!reqStream) { + nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) + return + } + const readableReq = deserializePoqoDataAndReceiptResp(reqStream) + // make sure we have it + const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(readableReq.finalState.txid) // , payload.timestamp) + //It is okay to ignore this transaction if the txId is not found in the queue. + if (queueEntry == null) { + //In the past we would enqueue the TX, expecially if syncing but that has been removed. + //The normal mechanism of sharing TXs is good enough. + nestedCountersInstance.countEvent('processing', 'broadcast_finalstate_noQueueEntry') + return + } + + // validate corresponding tell sender + if (_sender == null) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid sender for txid: ${readableReq.finalState.txid}, sender: ${_sender}`) + return + } + + if (readableReq.txGroupCycle) { + if (queueEntry.txGroupCycle !== readableReq.txGroupCycle) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`binary_poqo_data_and_receipt mismatch txGroupCycle for txid: ${readableReq.finalState.txid}, sender's txGroupCycle: ${readableReq.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) + nestedCountersInstance.countEvent( + 'poqo', + 'binary_poqo_data_and_receipt: mismatch txGroupCycle for txid ' + readableReq.finalState.txid + ) + } + delete readableReq.txGroupCycle + } + + const isValidFinalDataSender = + this.stateManager.transactionQueue.factValidateCorrespondingTellFinalDataSender(queueEntry, _sender) + if (isValidFinalDataSender === false) { + nestedCountersInstance.countEvent( + 'poqo', + 'poqo-data-and-receipt: Rejecting receipt: isValidFinalDataSender === false' + ) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid: sender ${_sender} for data: ${queueEntry.acceptedTx.txId}`) + return + } + + if (readableReq.receipt == null) { + nestedCountersInstance.countEvent( + 'poqo', + 'poqo-data-and-receipt: Rejecting receipt: readableReq.receipt == null' + ) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid: readableReq.receipt == null sender ${_sender}`) + return + } + if (readableReq.finalState.txid != readableReq.receipt.proposal.txid) { + nestedCountersInstance.countEvent( + 'poqo', + 'poqo-data-and-receipt: Rejecting receipt: readableReq.finalState.txid != readableReq.receipt.txid' + ) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid: readableReq.finalState.txid != readableReq.receipt.txid sender ${_sender} ${readableReq.finalState.txid} != ${readableReq.receipt.proposal.txid}`) + return + } + + if (!queueEntry.hasSentFinalReceipt) { + const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) + const hasTwoThirdsMajority = this.verifyAppliedReceipt(readableReq.receipt, executionGroupNodes) + if (!hasTwoThirdsMajority) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${readableReq.receipt.proposal.txid}`) + nestedCountersInstance.countEvent('poqo', 'poqo-data-and-receipt: Rejecting receipt because no majority') + return + } + if (logFlags.verbose) + this.mainLogger.debug(`POQo: received data & receipt for ${queueEntry.logID} starting receipt gossip`) + queueEntry.signedReceipt = readableReq.receipt + const receiptToGossip = { ...readableReq.receipt, txGroupCycle: queueEntry.txGroupCycle } + Comms.sendGossip( + 'poqo-receipt-gossip', + receiptToGossip, + null, + null, + queueEntry.transactionGroup, + false, + 4, + readableReq.finalState.txid, + '', + true + ) + queueEntry.hasSentFinalReceipt = true + } + + if (logFlags.debug) + this.mainLogger.debug( + `poqo-data-and-receipt ${queueEntry.logID}, ${Utils.safeStringify(readableReq.finalState.stateList)}` + ) + // add the data in + const savedAccountIds: Set = new Set() + for (const data of readableReq.finalState.stateList) { + //let wrappedResponse = data as Shardus.WrappedResponse + //this.queueEntryAddData(queueEntry, data) + if (data == null) { + /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`poqo-data-and-receipt data == null`) + continue + } + + if (queueEntry.collectedFinalData[data.accountId] == null) { + queueEntry.collectedFinalData[data.accountId] = data + savedAccountIds.add(data.accountId) + /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('poqo-data-and-receipt', `${queueEntry.logID}`, `poqo-data-and-receipt addFinalData qId: ${queueEntry.entryID} data:${utils.makeShortHash(data.accountId)} collected keys: ${utils.stringifyReduce(Object.keys(queueEntry.collectedFinalData))}`) + } + + // if (queueEntry.state === 'syncing') { + // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastfinalstate', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${data.accountId}`) + // } + } + + } catch (e) { + /* prettier-ignore */ if (logFlags.error) console.error(`Error processing poqoDataAndReceipt Binary handler: ${e}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + Comms.registerInternalBinary(poqoDataAndReceiptBinaryHandler.name, poqoDataAndReceiptBinaryHandler.handler) + + const poqoSendReceiptBinary: Route> = { + name: InternalRouteEnum.binary_poqo_send_receipt, + handler: async (payload, respond, header) => { + const route = InternalRouteEnum.binary_poqo_send_receipt + this.profiler.scopedProfileSectionStart(route) + nestedCountersInstance.countEvent('internal', route) + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + try { + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cPoqoSendReceiptReq) + if (!requestStream) { + return errorHandler(RequestErrorEnum.InvalidRequest) + } + + const readableReq = deserializePoqoSendReceiptReq(requestStream) + + const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(readableReq.proposal.txid) + if (queueEntry == null) { + /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'binary/poqo_send_receipt: no queue entry found') + return + } + if (readableReq.txGroupCycle) { + if (queueEntry.txGroupCycle !== readableReq.txGroupCycle) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`binary_poqo_send_receipt mismatch txGroupCycle for txid: ${readableReq.proposal.txid}, sender's txGroupCycle: ${readableReq.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) + nestedCountersInstance.countEvent( + 'poqo', + 'binary_poqo_send_receipt: mismatch txGroupCycle for tx ' + readableReq.proposal.txid + ) + } + delete readableReq.txGroupCycle + } + + if (queueEntry.signedReceipt) { + // We've already handled this + return + } + + const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) + const hasTwoThirdsMajority = this.verifyAppliedReceipt(readableReq, executionGroupNodes) + if (!hasTwoThirdsMajority) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${readableReq.proposal.txid}`) + nestedCountersInstance.countEvent('poqo', 'poqo-send-receipt: Rejecting receipt because no majority') + return + } + + if (logFlags.verbose) + this.mainLogger.debug( + `POQo: Received receipt from aggregator for ${queueEntry.logID} starting CT2 for data & receipt` + ) + const receivedReceipt = readableReq as SignedReceipt + queueEntry.signedReceipt = receivedReceipt + queueEntry.hasSentFinalReceipt = true + const receiptToGossip = { ...readableReq, txGroupCycle: queueEntry.txGroupCycle } + Comms.sendGossip( + 'poqo-receipt-gossip', + receiptToGossip, + null, + null, + queueEntry.transactionGroup, + false, + 4, + readableReq.proposal.txid, + '', + true + ) + + // Only forward data if we have a valid matching preApply + if (queueEntry.ourVoteHash === readableReq.proposalHash) { + // We are a winning node + nestedCountersInstance.countEvent('poqo', 'poqo-send-receipt: forwarding data') + this.stateManager.transactionQueue.factTellCorrespondingNodesFinalData(queueEntry) + } else { + nestedCountersInstance.countEvent('poqo', "poqo-send-receipt: no matching data. Can't forward") + } + } catch (e) { + /* prettier-ignore */ if (logFlags.error) console.error(`Error processing poqoSendReceiptBinary handler: ${e}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error)this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + + Comms.registerInternalBinary(poqoSendReceiptBinary.name, poqoSendReceiptBinary.handler) + + const poqoSendVoteBinaryHandler: Route> = { + name: InternalRouteEnum.binary_poqo_send_vote, + handler: (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_poqo_send_vote + profilerInstance.scopedProfileSectionStart(route, false) + try { + const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cPoqoSendVoteReq) + if (!payload) { + nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) + return + } + const readableReq = deserializePoqoSendVoteReq(stream) + const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(readableReq.txid) + if (queueEntry == null) { + /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-vote: no queue entry found') + return + } + // if (readableReq.txGroupCycle) { + // if (queueEntry.txGroupCycle !== readableReq.txGroupCycle) { + // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`binary_poqo_send_vote mismatch txGroupCycle for txid: ${readableReq.txid}, sender's txGroupCycle: ${readableReq.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) + // nestedCountersInstance.countEvent( + // 'poqo', + // 'binary_poqo_send_vote: mismatch txGroupCycle for tx ' + readableReq.txid + // ) + // } + // delete readableReq.txGroupCycle + // } + const collectedVoteHash = readableReq as AppliedVoteHash + + // Check if vote hash has a sign + if (!collectedVoteHash.sign) { + /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-vote: no sign found') + return + } + // We can reuse the same function for POQo + this.tryAppendVoteHash(queueEntry, collectedVoteHash) + } catch (e) { + /* prettier-ignore */ if (logFlags.error) console.error(`Error processing poqoSendVoteBinary handler: ${e}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) + } finally { + profilerInstance.scopedProfileSectionEnd(route) + } + }, + } + Comms.registerInternalBinary(poqoSendVoteBinaryHandler.name, poqoSendVoteBinaryHandler.handler) + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionConsensus.ts b/src/state-manager/TransactionConsensus.ts index fc66df448..0e0022cbf 100644 --- a/src/state-manager/TransactionConsensus.ts +++ b/src/state-manager/TransactionConsensus.ts @@ -84,6 +84,7 @@ import { deserializePoqoSendVoteReq, serializePoqoSendVoteReq } from '../types/P import { RequestReceiptForTxReqSerialized, serializeRequestReceiptForTxReq } from '../types/RequestReceiptForTxReq' import { RequestReceiptForTxRespSerialized, deserializeRequestReceiptForTxResp } from '../types/RequestReceiptForTxResp' import { removeDuplicateSignatures } from '../utils/functions/signs' +import { handlerMethods } from './TransactionConsensus.handlers' class TransactionConsenus { app: Shardus.App @@ -111,6 +112,9 @@ class TransactionConsenus { produceBadChallenge: boolean debugFailPOQo: number + // Methods from split files + setupHandlers: () => void + constructor( stateManager: StateManager, profiler: Profiler, @@ -143,1609 +147,9 @@ class TransactionConsenus { this.produceBadVote = this.config.debug.produceBadVote this.produceBadChallenge = this.config.debug.produceBadChallenge this.debugFailPOQo = 0 - } - - /*** - * ######## ## ## ######## ######## ####### #### ## ## ######## ###### - * ## ### ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## #### ## ## ## ## ## ## ## ## #### ## ## ## - * ###### ## ## ## ## ## ######## ## ## ## ## ## ## ## ###### - * ## ## #### ## ## ## ## ## ## ## #### ## ## - * ## ## ### ## ## ## ## ## ## ## ### ## ## ## - * ######## ## ## ######## ## ####### #### ## ## ## ###### - */ - - setupHandlers(): void { - Context.network.registerExternalGet('debug-poqo-fail', isDebugModeMiddleware, (req, res) => { - try { - const newChance = req.query.newChance - if (typeof newChance !== 'string' || !newChance) { - res.write(`debug-poqo-fail: missing param newChance ${this.debugFailPOQo}\n`) - res.end() - return - } - const newChanceInt = parseFloat(newChance) - if (newChanceInt >= 1) { - res.write(`debug-poqo-fail: newChance not a float: ${this.debugFailPOQo}\n`) - res.end() - return - } - this.debugFailPOQo = newChanceInt - res.write(`debug-poqo-fail: set: ${this.debugFailPOQo}\n`) - } catch (e) { - res.write(`debug-poqo-fail: error: ${this.debugFailPOQo}\n`) - } - res.end() - }) - - // todo need to sort out a cleaner way to allow local override of debug config values. should solve this once - // Context.network.registerExternalGet('debug-ignore-data-tell', isDebugModeMiddleware, (req, res) => { - // try { - // const newChance = req.query.newChance - // const currentValue = this.config.debug.ignoreDataTellChance - // const configName = "ignore-data-tell" - // if (typeof newChance !== 'string' || !newChance) { - // res.write(`${configName}: missing param newChance ${this.debugFailPOQo}\n`) - // res.end() - // return - // } - // const newChanceInt = parseFloat(newChance) - // if (newChanceInt >= 1) { - // res.write(`${configName}: newChance not a float: ${this.debugFailPOQo}\n`) - // res.end() - // return - // } - // //todo need and intermediate value because it is not safe to one off change this - // this.config.debug.ignoreDataTellChance = newChanceInt - // res.write(`${configName}: set: ${this.debugFailPOQo}\n`) - // } catch (e) { - // res.write(`${configName}: error: ${this.debugFailPOQo}\n`) - // } - // res.end() - // }) - - Context.network.registerExternalGet('debug-poq-switch', isDebugModeMiddleware, (_req, res) => { - try { - this.stateManager.transactionQueue.useNewPOQ = !this.stateManager.transactionQueue.useNewPOQ - res.write(`this.useNewPOQ: ${this.stateManager.transactionQueue.useNewPOQ}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-poq-wait-before-confirm', isDebugModeMiddleware, (_req, res) => { - try { - const waitTimeBeforeConfirm = _req.query.waitTimeBeforeConfirm as string - if (waitTimeBeforeConfirm && !isNaN(parseInt(waitTimeBeforeConfirm))) - this.config.stateManager.waitTimeBeforeConfirm = parseInt(waitTimeBeforeConfirm) - res.write(`stateManager.waitTimeBeforeConfirm: ${this.config.stateManager.waitTimeBeforeConfirm}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-poq-wait-limit-confirm', isDebugModeMiddleware, (_req, res) => { - try { - const waitLimitAfterFirstVote = _req.query.waitLimitAfterFirstVote as string - if (waitLimitAfterFirstVote && !isNaN(parseInt(waitLimitAfterFirstVote))) - this.config.stateManager.waitLimitAfterFirstVote = parseInt(waitLimitAfterFirstVote) - res.write(`stateManager.waitLimitAfterFirstVote: ${this.config.stateManager.waitLimitAfterFirstVote}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-poq-wait-before-receipt', isDebugModeMiddleware, (_req, res) => { - try { - const waitTimeBeforeReceipt = _req.query.waitTimeBeforeReceipt as string - if (waitTimeBeforeReceipt && !isNaN(parseInt(waitTimeBeforeReceipt))) - this.config.stateManager.waitTimeBeforeReceipt = parseInt(waitTimeBeforeReceipt) - res.write(`stateManager.waitTimeBeforeReceipt: ${this.config.stateManager.waitTimeBeforeReceipt}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-poq-wait-limit-receipt', isDebugModeMiddleware, (_req, res) => { - try { - const waitLimitAfterFirstMessage = _req.query.waitLimitAfterFirstMessage as string - if (waitLimitAfterFirstMessage && !isNaN(parseInt(waitLimitAfterFirstMessage))) - this.config.stateManager.waitLimitAfterFirstMessage = parseInt(waitLimitAfterFirstMessage) - res.write(`stateManager.waitLimitAfterFirstVote: ${this.config.stateManager.waitLimitAfterFirstMessage}\n`) - } catch (e) { - res.write(`${e}\n`) - } - res.end() - }) - - Context.network.registerExternalGet('debug-produceBadVote', isDebugModeMiddleware, (req, res) => { - this.produceBadVote = !this.produceBadVote - res.json({ status: 'ok', produceBadVote: this.produceBadVote }) - }) - - Context.network.registerExternalGet('debug-produceBadChallenge', isDebugModeMiddleware, (req, res) => { - this.produceBadChallenge = !this.produceBadChallenge - res.json({ status: 'ok', produceBadChallenge: this.produceBadChallenge }) - }) - - Context.network.registerExternalGet( - 'debug-profile-tx-timestamp-endpoint', - isDebugModeMiddleware, - async (req, res) => { - try { - const { offset } = req.query - - res.write('Profiling tx timestamp endpoint of all network nodes\n') - - const randomTxId = Context.crypto.hash(randomUUID()) - const cycleMarker = CycleChain.getCurrentCycleMarker() - const cycleCounter = CycleChain.newest.counter - - const stats = new Map() - const failed = new Map() - - const p2pPromises = Array.from(NodeList.nodes.values()) - .filter((node) => node.id !== Self.id) - .map((node) => { - const start = Date.now() - return this.p2p - .askBinary( - node, - InternalRouteEnum.binary_get_tx_timestamp, - { - cycleMarker, - cycleCounter, - txId: randomTxId, - }, - serializeGetTxTimestampReq, - deserializeGetTxTimestampResp, - {}, - '', - false, - offset ? parseInt(`${offset}`) : 30 * 1000 - ) - .then(() => { - const end = Date.now() - stats.set(`${node.externalIp}:${node.externalPort}`, end - start) - }) - .catch(() => { - const end = Date.now() - failed.set(`${node.externalIp}:${node.externalPort}`, end - start) - }) - }) - - // Wait for all requests to finish - await Promise.allSettled(p2pPromises) - - // Compute statistics - const responseTimes = Array.from(stats.values()).sort((a, b) => a - b) - const medianResponseTime = responseTimes[Math.floor(responseTimes.length / 2)] || 0 - const averageResponseTime = responseTimes.reduce((a, b) => a + b, 0) / (responseTimes.length || 1) - const failedNodes = Array.from(failed.keys()) - - console.log('Profiling results:', { - medianResponseTime, - averageResponseTime, - failedNodes, - stats, - }) - res.write('Profiling results:\n') - res.write(`Median response time: ${medianResponseTime}ms\n`) - res.write(`Average response time: ${averageResponseTime}ms\n`) - res.write(`Failed nodes: ${failedNodes.join(', ')}\n`) - res.write('Detailed stats:\n') - res.write(`Node,Response Time\n`) - stats.forEach((responseTime, node) => { - res.write(`${node},${responseTime}\n`) - }) - res.end() - } catch (error) { - console.error('Unexpected error:', error) - res.write(`\n{"error":"Internal Server Error","details":"${error.message}"}`) - res.end() - } - } - ) - // this.p2p.registerInternal( - // 'get_tx_timestamp', - // async ( - // payload: { txId: string; cycleCounter: number; cycleMarker: string }, - // respond: (arg0: Shardus.TimestampReceipt) => unknown - // ) => { - // const { txId, cycleCounter, cycleMarker } = payload - // - // if (this.txTimestampCache.has(cycleCounter) && this.txTimestampCache.get(cycleCounter).has(txId)) { - // await respond(this.txTimestampCache.get(cycleCounter).get(txId)) - // } else { - // const tsReceipt: Shardus.TimestampReceipt = this.generateTimestampReceipt( - // txId, - // cycleMarker, - // cycleCounter - // ) - // await respond(tsReceipt) - // } - // - // } - // ) - - // this.p2p.registerInternal( - // 'remove_timestamp_cache', - // async ( - // payload: TimestampRemoveRequest, - // respond: (result: boolean) => unknown - // ) => { - // const { txId, receipt2, cycleCounter } = payload - // /* eslint-disable security/detect-object-injection */ - // if (this.txTimestampCache[cycleCounter] && this.txTimestampCache[cycleCounter][txId]) { - // // remove the timestamp from the cache - // delete this.txTimestampCache[cycleCounter][txId] - // this.txTimestampCache[cycleCounter][txId] = null - // /* prettier-ignore */ this.mainLogger.debug(`Removed timestamp cache for txId: ${txId}, timestamp: ${Utils.safeStringify(this.txTimestampCache[cycleCounter][txId])}`) - // nestedCountersInstance.countEvent('consensus', 'remove_timestamp_cache') - // } - // await respond(true) - // } - // ) - - const getTxTimestampBinary: Route> = { - name: InternalRouteEnum.binary_get_tx_timestamp, - handler: async (payload, respond, header) => { - const route = InternalRouteEnum.binary_get_tx_timestamp - this.profiler.scopedProfileSectionStart(route) - nestedCountersInstance.countEvent('internal', route) - profilerInstance.scopedProfileSectionStart(route, true, payload.length) - - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - let tsReceipt: Shardus.TimestampReceipt - try { - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetTxTimestampReq) - if (!requestStream) { - errorHandler(RequestErrorEnum.InvalidRequest) - return respond(tsReceipt, serializeGetTxTimestampResp) - } - - const readableReq = deserializeGetTxTimestampReq(requestStream) - // handle rare race condition where we have seen the txId but not the timestamp - if ( - Context.config.p2p.timestampCacheFix && - this.seenTimestampRequests.has(readableReq.txId) && - !this.txTimestampCacheByTxId.has(readableReq.txId) - ) { - nestedCountersInstance.countEvent('consensus', 'get_tx_timestamp seen txId but found no timestamp') - return respond(BadRequest('get_tx_timestamp seen txId but found no timestamp'), serializeResponseError) - } - this.seenTimestampRequests.add(readableReq.txId) - tsReceipt = this.getOrGenerateTimestampReceiptFromCache( - readableReq.txId, - readableReq.cycleMarker, - readableReq.cycleCounter - ) - return respond(tsReceipt, serializeGetTxTimestampResp) - } catch (e) { - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - respond(tsReceipt, serializeGetTxTimestampResp) - } finally { - profilerInstance.scopedProfileSectionEnd(route, payload.length) - } - }, - } - - this.p2p.registerInternalBinary(getTxTimestampBinary.name, getTxTimestampBinary.handler) - - // this.p2p.registerInternal( - // 'get_confirm_or_challenge', - // async (payload: AppliedVoteQuery, respond: (arg0: ConfirmOrChallengeQuery) => unknown) => { - // nestedCountersInstance.countEvent('consensus', 'get_confirm_or_challenge') - // this.profiler.scopedProfileSectionStart('get_confirm_or_challenge handler', true) - // const confirmOrChallengeResult: ConfirmOrChallengeQueryResponse = { - // txId: '', - // appliedVoteHash: '', - // result: null, - // uniqueCount: 0, - // } - // try { - // const { txId } = payload - // let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(txId) - // if (queueEntry == null) { - // // It is ok to search the archive for this. Not checking this was possibly breaking the gossip chain before - // queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived( - // txId, - // 'get_confirm_or_challenge' - // ) - // } - - // if (queueEntry == null) { - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('get_confirm_or_challenge: queueEntry not found in getQueueEntrySafe or getQueueEntryArchived for txId: ', txId) - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge no queue entry for ${payload.txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(payload.txId)]}`) - // await respond(confirmOrChallengeResult) - // } - // if (queueEntry.receivedBestConfirmation == null && queueEntry.receivedBestChallenge == null) { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'get_confirm_or_challenge no confirmation or challenge' - // ) - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge no confirmation or challenge for ${queueEntry.logID}, bestVote: ${Utils.safeStringify(queueEntry.receivedBestVote)}, bestConfirmation: ${Utils.safeStringify(queueEntry.receivedBestConfirmation)}`) - // await respond(confirmOrChallengeResult) - // } - - // // refine the result and unique count - // const { receivedBestChallenge, receivedBestConfirmation, uniqueChallengesCount } = queueEntry; - // if (receivedBestChallenge && uniqueChallengesCount >= this.config.stateManager.minRequiredChallenges) { - // confirmOrChallengeResult.result = receivedBestChallenge; - // confirmOrChallengeResult.uniqueCount = uniqueChallengesCount; - // } else { - // confirmOrChallengeResult.result = receivedBestConfirmation; - // confirmOrChallengeResult.uniqueCount = 1; - // } - // await respond(confirmOrChallengeResult) - // } catch (e) { - // if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge error ${e.message}`) - // } finally { - // this.profiler.scopedProfileSectionEnd('get_confirm_or_challenge handler') - // } - // } - // ) - - const getChallengeOrConfirmBinaryHandler: Route> = { - name: InternalRouteEnum.binary_get_confirm_or_challenge, - // eslint-disable-next-line @typescript-eslint/no-unused-vars - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_get_confirm_or_challenge - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, true, payload.length) - const confirmOrChallengeResult: GetConfirmOrChallengeResp = { - txId: '', - appliedVoteHash: '', - result: null, - uniqueCount: 0, - } - try { - const reqStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetConfirmOrChallengeReq) - if (!reqStream) { - nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) - respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) - return - } - const request = deserializeGetConfirmOrChallengeReq(reqStream) - const { txId } = request - let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(txId) - if (queueEntry == null) { - // It is ok to search the archive for this. Not checking this was possibly breaking the gossip chain before - queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, route) - } - if (queueEntry == null) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge no queue entry for ${txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(txId)]}`) - respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) - return - } - if (queueEntry.receivedBestConfirmation == null && queueEntry.receivedBestChallenge == null) { - nestedCountersInstance.countEvent('consensus', 'get_confirm_or_challenge no confirmation or challenge') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge no confirmation or challenge for ${queueEntry.logID}, bestVote: ${Utils.safeStringify(queueEntry.receivedBestVote)}, bestConfirmation: ${Utils.safeStringify(queueEntry.receivedBestConfirmation)}`) - respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) - return - } - - // refine the result and unique count - const { receivedBestChallenge, receivedBestConfirmation, uniqueChallengesCount } = queueEntry - if (receivedBestChallenge && uniqueChallengesCount >= this.config.stateManager.minRequiredChallenges) { - confirmOrChallengeResult.result = receivedBestChallenge - confirmOrChallengeResult.uniqueCount = uniqueChallengesCount - } else { - confirmOrChallengeResult.result = receivedBestConfirmation - confirmOrChallengeResult.uniqueCount = 1 - } - respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) - } catch (e) { - // Error handling - if (logFlags.error) this.mainLogger.error(`get_confirm_or_challenge error ${e.message}`) - respond(confirmOrChallengeResult, serializeGetConfirmOrChallengeResp) - } finally { - this.profiler.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary(getChallengeOrConfirmBinaryHandler.name, getChallengeOrConfirmBinaryHandler.handler) - - // this.p2p.registerInternal( - // 'get_applied_vote', - // async (payload: AppliedVoteQuery, respond: (arg0: AppliedVoteQueryResponse) => unknown) => { - // nestedCountersInstance.countEvent('consensus', 'get_applied_vote') - // const { txId } = payload - // let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(txId) - // if (queueEntry == null) { - // // It is ok to search the archive for this. Not checking this was possibly breaking the gossip chain before - // queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, 'get_applied_vote') - // } - - // if (queueEntry == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_applied_vote no queue entry for ${payload.txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(payload.txId)]}`) - // return - // } - // if (queueEntry.receivedBestVote == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`get_applied_vote no receivedBestVote for ${payload.txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(payload.txId)]}`) - // return - // } - // const appliedVote: AppliedVoteQueryResponse = { - // txId, - // appliedVote: queueEntry.receivedBestVote, - // appliedVoteHash: queueEntry.receivedBestVoteHash - // ? queueEntry.receivedBestVoteHash - // : this.calculateVoteHash(queueEntry.receivedBestVote), - // } - // await respond(appliedVote) - // } - // ) - - // const GetAppliedVoteBinaryHandler: Route> = { - // name: InternalRouteEnum.binary_get_applied_vote, - // // eslint-disable-next-line @typescript-eslint/no-unused-vars - // handler: async (payload, respond, header, sign) => { - // const route = InternalRouteEnum.binary_get_applied_vote - // nestedCountersInstance.countEvent('internal', route) - // this.profiler.scopedProfileSectionStart(route, false, payload.length) - // const errorHandler = ( - // errorType: RequestErrorEnum, - // opts?: { customErrorLog?: string; customCounterSuffix?: string } - // ): void => requestErrorHandler(route, errorType, header, opts) - - // try { - // const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAppliedVoteReq) - // if (!requestStream) { - // errorHandler(RequestErrorEnum.InvalidRequestType) - // return respond(BadRequest('invalid request stream'), serializeResponseError) - // } - - // // verification data checks - // if (header.verification_data == null) { - // errorHandler(RequestErrorEnum.MissingVerificationData) - // return respond(BadRequest('missing verification data'), serializeResponseError) - // } - - // const txId = header.verification_data - // let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(txId) - // if (queueEntry == null) { - // // check the archived queue entries - // queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived(txId, route) - // } - - // if (queueEntry == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route} no queue entry for ${txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(txId)]}`) - // errorHandler(RequestErrorEnum.InvalidRequest) - // return respond(NotFound('queue entry not found'), serializeResponseError) - // } - - // const req = deserializeGetAppliedVoteReq(requestStream) - // if (req.txId !== txId) { - // errorHandler(RequestErrorEnum.InvalidPayload, { customErrorLog: 'txId mismatch' }) - // return respond(BadRequest('txId mismatch'), serializeResponseError) - // } - - // if (queueEntry.receivedBestVote == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route} no receivedBestVote for ${req.txId} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(req.txId)]}`) - // return respond(NotFound('receivedBestVote not found'), serializeResponseError) - // } - // const appliedVote: GetAppliedVoteResp = { - // txId, - // appliedVote: queueEntry.receivedBestVote, - // appliedVoteHash: queueEntry.receivedBestVoteHash - // ? queueEntry.receivedBestVoteHash - // : this.calculateVoteHash(queueEntry.receivedBestVote), - // } - // respond(appliedVote, serializeGetAppliedVoteResp) - // } catch (e) { - // nestedCountersInstance.countEvent('internal', `${route}-exception`) - // this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - // return respond(InternalError('exception executing request'), serializeResponseError) - // } finally { - // this.profiler.scopedProfileSectionEnd(route) - // } - // }, - // } - - // Comms.registerInternalBinary(GetAppliedVoteBinaryHandler.name, GetAppliedVoteBinaryHandler.handler) - - // Comms.registerGossipHandler( - // 'gossip-applied-vote', - // async (payload: AppliedVote, sender: string, tracker: string) => { - // nestedCountersInstance.countEvent('consensus', 'gossip-applied-vote') - // profilerInstance.scopedProfileSectionStart('gossip-applied-vote handler', true) - // try { - // const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${payload.txid} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:noTX`) - // return - // } - // const newVote = payload as AppliedVote - // const appendSuccessful = this.stateManager.transactionConsensus.tryAppendVote(queueEntry, newVote) - - // if (appendSuccessful) { - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${payload.txid} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:appended`) - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // if (gossipGroup.length > 1) { - // // should consider only forwarding in some cases? - // this.stateManager.debugNodeGroup( - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp, - // `share appliedVote to consensus nodes`, - // gossipGroup - // ) - // Comms.sendGossip( - // 'gossip-applied-vote', - // newVote, - // tracker, - // null, - // queueEntry.transactionGroup, - // false, - // -1, - // queueEntry.acceptedTx.txId, - // `${NodeList.activeIdToPartition.get(newVote.node_id)}` - // ) - // } - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('gossip-applied-vote handler') - // } - // } - // ) - - // this.p2p.registerGossipHandler( - // 'spread_appliedReceipt', - // async ( - // payload: { - // txid: string - // result?: boolean - // appliedVotes?: AppliedVote[] - // app_data_hash?: string - // }, - // tracker: string, - // msgSize: number - // ) => { - // nestedCountersInstance.countEvent('consensus', 'spread_appliedReceipt') - // profilerInstance.scopedProfileSectionStart('spread_appliedReceipt', false, msgSize) - // let respondSize = cUninitializedSize - // try { - // const appliedReceipt = payload as AppliedReceipt - // let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(appliedReceipt.txid) // , payload.timestamp) - // if (queueEntry == null) { - // if (queueEntry == null) { - // // It is ok to search the archive for this. Not checking this was possibly breaking the gossip chain before - // queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived( - // payload.txid as string, - // 'spread_appliedReceipt' - // ) // , payload.timestamp) - // if (queueEntry != null) { - // // TODO : PERF on a faster version we may just bail if this lives in the arcive list. - // // would need to make sure we send gossip though. - // } - // } - // if (queueEntry == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`spread_appliedReceipt no queue entry for ${appliedReceipt.txid} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(payload.txid)]}`) - // // NEW start repair process that will find the TX then apply repairs - // // this.stateManager.transactionRepair.repairToMatchReceiptWithoutQueueEntry(appliedReceipt) - // return - // } - // } - - // if ( - // this.stateManager.testFailChance( - // this.stateManager.ignoreRecieptChance, - // 'spread_appliedReceipt', - // utils.stringifyReduce(appliedReceipt.txid), - // '', - // logFlags.verbose - // ) === true - // ) { - // return - // } - - // // TODO STATESHARDING4 ENDPOINTS check payload format - // // TODO STATESHARDING4 ENDPOINTS that this message is from a valid sender (may need to check docs) - - // const receiptNotNull = appliedReceipt != null - - // if (queueEntry.gossipedReceipt === false) { - // queueEntry.gossipedReceipt = true - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`spread_appliedReceipt update ${queueEntry.logID} receiptNotNull:${receiptNotNull}`) - - // if (queueEntry.archived === false) { - // queueEntry.recievedAppliedReceipt = appliedReceipt - // } - - // // I think we handle the negative cases later by checking queueEntry.recievedAppliedReceipt vs queueEntry.appliedReceipt - - // // share the appliedReceipt. - // const sender = null - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // if (gossipGroup.length > 1) { - // // should consider only forwarding in some cases? - // this.stateManager.debugNodeGroup( - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp, - // `share appliedReceipt to neighbors`, - // gossipGroup - // ) - // //no await so we cant get the message out size in a reasonable way - // respondSize = await this.p2p.sendGossipIn( - // 'spread_appliedReceipt', - // appliedReceipt, - // tracker, - // sender, - // gossipGroup, - // false, - // -1, - // queueEntry.acceptedTx.txId - // ) - // } - // } else { - // // we get here if the receipt has already been shared - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`spread_appliedReceipt skipped ${queueEntry.logID} receiptNotNull:${receiptNotNull} Already Shared`) - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('spread_appliedReceipt', respondSize) - // } - // } - // ) - - // DEPRECATED AFTER POQO - // this.p2p.registerGossipHandler( - // 'spread_appliedReceipt2', - // async ( - // payload: any, - // tracker: string, - // msgSize: number - // ) => { - // nestedCountersInstance.countEvent('consensus', 'spread_appliedReceipt2 handler') - // profilerInstance.scopedProfileSectionStart('spread_appliedReceipt2', false, msgSize) - // const respondSize = cUninitializedSize - - // // ignore the message for debugging purpose - // if ( - // this.stateManager.testFailChance( - // this.stateManager.ignoreRecieptChance, - // 'spread_appliedReceipt2', - // utils.stringifyReduce(payload.txid), - // '', - // logFlags.verbose - // ) === true - // ) { - // return - // } - - // try { - // // extract txId - // let txId: string - // let receivedSignedReceipt: SignedReceipt - // if (Context.config.stateManager.attachDataToReceipt) { - // txId = payload.receipt?.txid - // receivedSignedReceipt = payload.receipt as SignedReceipt - // } else { - // receivedAppliedReceipt2 = payload as AppliedReceipt2 - // txId = receivedAppliedReceipt2.txid - // } - // if (receivedAppliedReceipt2 == null) { - // /* prettier-ignore */ this.mainLogger.error(`spread_appliedReceipt2 ${txId} received null receipt`) - // nestedCountersInstance.countEvent(`consensus`, `spread_appliedReceipt received null receipt`) - // return - // } - - // // we need confirmation in new POQ - // if (this.stateManager.transactionQueue.useNewPOQ && receivedAppliedReceipt2.confirmOrChallenge == null) { - // /* prettier-ignore */ this.mainLogger.error(`spread_appliedReceipt2 ${txId} received null receipt`) - // nestedCountersInstance.countEvent(`consensus`, `spread_appliedReceipt received null confirm message`) - // return - // } - - // // check if we have the queue entry - // let queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(txId) // , payload.timestamp) - // if (queueEntry == null) { - // if (queueEntry == null) { - // // It is ok to search the archive for this. Not checking this was possibly breaking the gossip chain before - // queueEntry = this.stateManager.transactionQueue.getQueueEntryArchived( - // txId, - // 'spread_appliedReceipt2' - // ) // , payload.timestamp) - // if (queueEntry != null) { - // // TODO : PERF on a faster version we may just bail if this lives in the arcive list. - // // would need to make sure we send gossip though. - // } - // } - // if (queueEntry == null) { - // /* prettier-ignore */ - // if (logFlags.error || this.stateManager.consensusLog) - // this.mainLogger.error( - // `spread_appliedReceipt no queue entry for ${txId} txId:${txId}` - // ) - // // NEW start repair process that will find the TX then apply repairs - // // this.stateManager.transactionRepair.repairToMatchReceiptWithoutQueueEntry(receivedAppliedReceipt2) - // return - // } - // } - // if (queueEntry.hasValidFinalData || queueEntry.accountDataSet) { - // /* prettier-ignore */ - // if (logFlags.debug || this.stateManager.consensusLog) - // this.mainLogger.debug(`spread_appliedReceipt2 skipped ${queueEntry.logID} Already Shared`) - // nestedCountersInstance.countEvent(`consensus`, `spread_appliedReceipt2 skipped Already Shared`) - // return - // } - - // // for debugging and testing purpose - // if ( - // this.stateManager.testFailChance( - // this.stateManager.ignoreRecieptChance, - // 'spread_appliedReceipt2', - // utils.stringifyReduce(txId), - // '', - // logFlags.verbose - // ) === true - // ) { - // return - // } - - // // todo: STATESHARDING4 ENDPOINTS check payload format - // // todo: STATESHARDING4 ENDPOINTS that this message is from a valid sender (may need to check docs) - - // const receiptNotNull = receivedAppliedReceipt2 != null - - // // repair only if data is not attached to the receipt - // if (Context.config.stateManager.attachDataToReceipt === false && (queueEntry.state === 'expired' || queueEntry.state === 'almostExpired')) { - // //have we tried to repair this yet? - // const startRepair = queueEntry.repairStarted === false - // /* prettier-ignore */ - // if (logFlags.debug || this.stateManager.consensusLog) this.mainLogger.debug(`spread_appliedReceipt2. tx expired. start repair:${startRepair}. update ${queueEntry.logID} receiptNotNull:${receiptNotNull}`); - // if (queueEntry.repairStarted === false) { - // nestedCountersInstance.countEvent('repair1', 'got receipt for expiredTX start repair') - // queueEntry.appliedReceiptForRepair2 = receivedAppliedReceipt2 - // //todo any limits to how many repairs at once to allow? - // this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) - // } - // //x - dont forward gossip, it is probably too late? - // //do forward gossip so we dont miss on sharing a receipt! - // //return - // } - - // // decide whether we should store and forward the receipt - // let shouldStore = false - // let shouldForward = false - // if (Context.config.stateManager.stuckTxQueueFix) { - // // queueEntry.gossipedReceipt will decide the actual forwarding - // shouldForward = true - // } - // if (this.config.stateManager.useNewPOQ === false) { - // shouldStore = queueEntry.gossipedReceipt === false - // } else { - // const localAppliedReceipt2 = queueEntry.appliedReceipt2 - // if (localAppliedReceipt2) { - // const localReceiptConfirmNode = localAppliedReceipt2.confirmOrChallenge.nodeId - // const receivedReceiptConfirmNode = receivedAppliedReceipt2.confirmOrChallenge.nodeId - // if (localReceiptConfirmNode === receivedReceiptConfirmNode) { - // if (Context.config.stateManager.stuckTxQueueFix) { - // // we should not care about the rank for receipt2+data gossips - // shouldForward = true - // } else { - // shouldForward = false - // } - // if (logFlags.debug) - // this.mainLogger.debug( - // `spread_appliedReceipt2 ${queueEntry.logID} we have the same receipt. We do not need to store but we will forward` - // ) - // } else { - // if (logFlags.debug) - // this.mainLogger.debug( - // `spread_appliedReceipt2 ${queueEntry.logID} we have different receipt ${ - // queueEntry.logID - // }. localReceipt: ${utils.stringifyReduce( - // localAppliedReceipt2 - // )}, receivedReceipt: ${utils.stringifyReduce(receivedAppliedReceipt2)}` - // ) - // const localReceiptRank = this.stateManager.transactionQueue.computeNodeRank( - // localReceiptConfirmNode, - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp - // ) - // const receivedReceiptRank = this.stateManager.transactionQueue.computeNodeRank( - // receivedReceiptConfirmNode, - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp - // ) - // if (receivedReceiptRank < localReceiptRank) { - // shouldStore = true - // shouldForward = true - // this.mainLogger.debug( - // `spread_appliedReceipt2 ${queueEntry.logID} received receipt is better. we will store and forward` - // ) - // } - // } - // } else { - // shouldStore = true - // shouldForward = true - // if (logFlags.debug) - // this.mainLogger.debug( - // `spread_appliedReceipt2 ${queueEntry.logID} we do not have a local or received receipt generated. will store and forward` - // ) - // } - // } - // // if we are tx group node and haven't got data yet, we should store and forward the receipt - // if (queueEntry.isInExecutionHome === false) { - // if (queueEntry.accountDataSet === false || Object.keys(queueEntry.collectedFinalData).length === 0) { - // shouldStore = true - // shouldForward = true - // if (logFlags.debug) - // this.mainLogger.debug( - // `spread_appliedReceipt2 ${queueEntry.logID} we are tx group node and do not have receipt2 yet. will store and forward` - // ) - // } - // } - // this.mainLogger.debug(`spread_appliedReceipt2 ${queueEntry.logID} shouldStore:${shouldStore}, shouldForward:${shouldForward} isInExecutionHome:${queueEntry.isInExecutionHome}, accountDataSet:${queueEntry.accountDataSet}, collectedFinalData:${Object.keys(queueEntry.collectedFinalData).length}`) - - // // process, store and forward the receipt - // if (shouldStore === true && queueEntry.gossipedReceipt === false) { - // /* prettier-ignore */ - // if (logFlags.debug || this.stateManager.consensusLog) - // this.mainLogger.debug( - // `spread_appliedReceipt2 update ${queueEntry.logID} receiptNotNull:${receiptNotNull}, appliedReceipt2: ${utils.stringifyReduce(receivedAppliedReceipt2)}` - // ) - - // if (queueEntry.archived === false) { - // queueEntry.recievedAppliedReceipt2 = receivedAppliedReceipt2 - // queueEntry.appliedReceipt2 = receivedAppliedReceipt2 // is this necessary? - // } else { - // this.mainLogger.error(`spread_appliedReceipt2 queueEntry.archived === true`) - // } - - // // commit the accounts if the receipt is valid and has data attached - // if (Context.config.stateManager.attachDataToReceipt && receivedAppliedReceipt2.result) { - // const wrappedStates = payload.wrappedStates as { [key: string]: Shardus.WrappedResponse} - // if (wrappedStates == null) { - // nestedCountersInstance.countEvent(`consensus`, `spread_appliedReceipt2 no wrappedStates`) - // this.mainLogger.error(`spread_appliedReceipt2 no wrappedStates for ${txId}`) - // } else { - // const filteredStates = {} - // const nodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = - // this.stateManager.currentCycleShardData.nodeShardData - // for (const accountId in wrappedStates) { - // const isLocal = ShardFunctions.testAddressInRange( - // accountId, - // nodeShardData.storedPartitions - // ) - // if (isLocal) { - // filteredStates[accountId] = 1 - // } - // } - // const accountRecords = [] - // for (const accountId in wrappedStates) { - // if (filteredStates[accountId] == null) continue - // const wrappedState = wrappedStates[accountId] as Shardus.WrappedResponse - // const indexOfAccountIdInVote = receivedAppliedReceipt2.appliedVote.account_id.indexOf(accountId) - // if (indexOfAccountIdInVote === -1) { - // this.mainLogger.error(`spread_appliedReceipt2 accountId ${accountId} not found in appliedVote`) - // continue - // } - // const afterStateHash = receivedAppliedReceipt2.appliedVote.account_state_hash_after[indexOfAccountIdInVote] - // if (wrappedState.stateId !== afterStateHash) { - // this.mainLogger.error(`spread_appliedReceipt2 accountId ${accountId} state hash mismatch with appliedVote`) - // continue - // } - // queueEntry.collectedFinalData[accountId] = wrappedState // processTx() will do actual commit - // accountRecords.push(wrappedState) - // nestedCountersInstance.countEvent(`consensus`, `spread_appliedReceipt2 add to final data`, accountRecords.length) - // } - // if (logFlags.debug) this.mainLogger.debug(`Use final data from appliedReceipt2 ${queueEntry.logID}`, queueEntry.collectedFinalData); - // } - // } - - // // I think we handle the negative cases later by checking queueEntry.recievedAppliedReceipt vs queueEntry.receivedAppliedReceipt2 - - // // share the receivedAppliedReceipt2. - // const sender = null - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // if (gossipGroup.length > 1) { - // // should consider only forwarding in some cases? - // this.stateManager.debugNodeGroup( - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp, - // `share appliedReceipt to neighbors`, - // gossipGroup - // ) - // //no await so we cant get the message out size in a reasonable way - // this.p2p.sendGossipIn( - // 'spread_appliedReceipt2', - // payload, - // tracker, - // sender, - // gossipGroup, - // false, - // -1, - // queueEntry.acceptedTx.txId - // ) - // queueEntry.gossipedReceipt = true - // nestedCountersInstance.countEvent('consensus', 'spread_appliedReceipt2 gossip forwarded') - // } - // } else { - // // we get here if the receipt has already been shared - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`spread_appliedReceipt2 skipped ${queueEntry.logID} receiptNotNull:${receiptNotNull} Already Shared or shouldStoreAndForward:${shouldStore}`) - // } - // } catch (ex) { - // this.statemanager_fatal( - // `spread_appliedReceipt2_ex`, - // 'spread_appliedReceipt2 endpoint failed: ' + ex.name + ': ' + ex.message + ' at ' + ex.stack - // ) - // } finally { - // profilerInstance.scopedProfileSectionEnd('spread_appliedReceipt2') - // } - // } - // ) - - // DEPRECATED AFTER POQO - // Comms.registerGossipHandler( - // 'spread_confirmOrChallenge', - // (payload: ConfirmOrChallengeMessage, msgSize: number) => { - // nestedCountersInstance.countEvent('consensus', 'spread_confirmOrChallenge handler') - // profilerInstance.scopedProfileSectionStart('spread_confirmOrChallenge', false, msgSize) - // try { - // const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.appliedVote?.txid) // , payload.timestamp) - // if (queueEntry == null) { - // if (logFlags.error) { - // this.mainLogger.error( - // `spread_confirmOrChallenge no queue entry for ${payload.appliedVote?.txid} dbg:${ - // this.stateManager.debugTXHistory[utils.stringifyReduce(payload.appliedVote?.txid)] - // }` - // ) - // } - // return - // } - // if (queueEntry.acceptConfirmOrChallenge === false) { - // if (logFlags.debug) - // this.mainLogger.debug(`spread_confirmOrChallenge ${queueEntry.logID} not accepting anymore`) - // return - // } - - // const appendSuccessful = this.tryAppendMessage(queueEntry, payload) - - // if (logFlags.debug) - // this.mainLogger.debug( - // `spread_confirmOrChallenge ${queueEntry.logID} appendSuccessful:${appendSuccessful}` - // ) - - // if (appendSuccessful) { - // // Gossip further - // const sender = null - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // Comms.sendGossip('spread_confirmOrChallenge', payload, '', sender, gossipGroup, false, 10, queueEntry.acceptedTx.txId, `handler_${NodeList.activeIdToPartition.get(payload.appliedVote?.node_id)}`) - // queueEntry.gossipedConfirmOrChallenge = true - // } - // } catch (e) { - // this.mainLogger.error(`Error in spread_confirmOrChallenge handler: ${e.message}`) - // } finally { - // profilerInstance.scopedProfileSectionEnd('spread_confirmOrChallenge', msgSize) - // } - // } - // ) - - Comms.registerGossipHandler('poqo-receipt-gossip', (payload: SignedReceipt & { txGroupCycle: number }) => { - profilerInstance.scopedProfileSectionStart('poqo-receipt-gossip') - try { - const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.proposal.txid) - if (queueEntry == null) { - nestedCountersInstance.countEvent('poqo', 'error: gossip skipped: no queue entry') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-receipt-gossip no queue entry for ${payload.proposal.txid}`) - return - } - if (payload.txGroupCycle) { - if (queueEntry.txGroupCycle !== payload.txGroupCycle) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-receipt-gossip mismatch txGroupCycle for txid: ${payload.proposal.txid}, sender's txGroupCycle: ${payload.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - nestedCountersInstance.countEvent( - 'poqo', - 'poqo-receipt-gossip: mismatch txGroupCycle for txid ' + payload.proposal.txid - ) - } - delete payload.txGroupCycle - } - - if (queueEntry.hasSentFinalReceipt === true) { - // We've already send this receipt, no need to send it again - return - } - - if (logFlags.verbose) - this.mainLogger.debug(`POQo: received receipt from gossip for ${queueEntry.logID} forwarding gossip`) - - const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) - const hasTwoThirdsMajority = this.verifyAppliedReceipt(payload, executionGroupNodes) - if (!hasTwoThirdsMajority) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${payload.proposal.txid}`) - nestedCountersInstance.countEvent('poqo', 'poqo-receipt-gossip: Rejecting receipt because no majority') - return - } - - queueEntry.signedReceipt = { ...payload } - payload.txGroupCycle = queueEntry.txGroupCycle - Comms.sendGossip( - 'poqo-receipt-gossip', - payload, - null, - null, - queueEntry.transactionGroup, - false, - 4, - payload.proposal.txid, - '', - true - ) - - queueEntry.hasSentFinalReceipt = true - - // If the queue entry does not have the valid final data then request that - if (!queueEntry.hasValidFinalData) { - setTimeout(async () => { - // Check if we have final data - if (queueEntry.hasValidFinalData) { - return - } - if (logFlags.verbose) - this.mainLogger.debug(`poqo-receipt-gossip: requesting final data for ${queueEntry.logID}`) - nestedCountersInstance.countEvent('request-final-data', 'final data timeout, making explicit request') - - const nodesToAskKeys = payload.signaturePack?.map((signature) => signature.owner) - - await this.stateManager.transactionQueue.requestFinalData( - queueEntry, - payload.proposal.accountIDs, - nodesToAskKeys - ) - - nestedCountersInstance.countEvent('request-final-data', 'final data received') - }, this.config.stateManager.nonExWaitForData) - } - } finally { - profilerInstance.scopedProfileSectionEnd('poqo-receipt-gossip') - } - }) - - const poqoDataAndReceiptBinaryHandler: Route> = { - name: InternalRouteEnum.binary_poqo_data_and_receipt, - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_poqo_data_and_receipt - this.profiler.scopedProfileSectionStart(route, false) - try { - const _sender = header.sender_id - const reqStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cPoqoDataAndReceiptReq) - if (!reqStream) { - nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) - return - } - const readableReq = deserializePoqoDataAndReceiptResp(reqStream) - // make sure we have it - const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(readableReq.finalState.txid) // , payload.timestamp) - //It is okay to ignore this transaction if the txId is not found in the queue. - if (queueEntry == null) { - //In the past we would enqueue the TX, expecially if syncing but that has been removed. - //The normal mechanism of sharing TXs is good enough. - nestedCountersInstance.countEvent('processing', 'broadcast_finalstate_noQueueEntry') - return - } - - // validate corresponding tell sender - if (_sender == null) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid sender for txid: ${readableReq.finalState.txid}, sender: ${_sender}`) - return - } - - if (readableReq.txGroupCycle) { - if (queueEntry.txGroupCycle !== readableReq.txGroupCycle) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`binary_poqo_data_and_receipt mismatch txGroupCycle for txid: ${readableReq.finalState.txid}, sender's txGroupCycle: ${readableReq.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - nestedCountersInstance.countEvent( - 'poqo', - 'binary_poqo_data_and_receipt: mismatch txGroupCycle for txid ' + readableReq.finalState.txid - ) - } - delete readableReq.txGroupCycle - } - - const isValidFinalDataSender = - this.stateManager.transactionQueue.factValidateCorrespondingTellFinalDataSender(queueEntry, _sender) - if (isValidFinalDataSender === false) { - nestedCountersInstance.countEvent( - 'poqo', - 'poqo-data-and-receipt: Rejecting receipt: isValidFinalDataSender === false' - ) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid: sender ${_sender} for data: ${queueEntry.acceptedTx.txId}`) - return - } - - if (readableReq.receipt == null) { - nestedCountersInstance.countEvent( - 'poqo', - 'poqo-data-and-receipt: Rejecting receipt: readableReq.receipt == null' - ) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid: readableReq.receipt == null sender ${_sender}`) - return - } - if (readableReq.finalState.txid != readableReq.receipt.proposal.txid) { - nestedCountersInstance.countEvent( - 'poqo', - 'poqo-data-and-receipt: Rejecting receipt: readableReq.finalState.txid != readableReq.receipt.txid' - ) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid: readableReq.finalState.txid != readableReq.receipt.txid sender ${_sender} ${readableReq.finalState.txid} != ${readableReq.receipt.proposal.txid}`) - return - } - - if (!queueEntry.hasSentFinalReceipt) { - const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) - const hasTwoThirdsMajority = this.verifyAppliedReceipt(readableReq.receipt, executionGroupNodes) - if (!hasTwoThirdsMajority) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${readableReq.receipt.proposal.txid}`) - nestedCountersInstance.countEvent('poqo', 'poqo-data-and-receipt: Rejecting receipt because no majority') - return - } - if (logFlags.verbose) - this.mainLogger.debug(`POQo: received data & receipt for ${queueEntry.logID} starting receipt gossip`) - queueEntry.signedReceipt = readableReq.receipt - const receiptToGossip = { ...readableReq.receipt, txGroupCycle: queueEntry.txGroupCycle } - Comms.sendGossip( - 'poqo-receipt-gossip', - receiptToGossip, - null, - null, - queueEntry.transactionGroup, - false, - 4, - readableReq.finalState.txid, - '', - true - ) - queueEntry.hasSentFinalReceipt = true - } - - if (logFlags.debug) - this.mainLogger.debug( - `poqo-data-and-receipt ${queueEntry.logID}, ${Utils.safeStringify(readableReq.finalState.stateList)}` - ) - // add the data in - const savedAccountIds: Set = new Set() - for (const data of readableReq.finalState.stateList) { - //let wrappedResponse = data as Shardus.WrappedResponse - //this.queueEntryAddData(queueEntry, data) - if (data == null) { - /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`poqo-data-and-receipt data == null`) - continue - } - - if (queueEntry.collectedFinalData[data.accountId] == null) { - queueEntry.collectedFinalData[data.accountId] = data - savedAccountIds.add(data.accountId) - /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('poqo-data-and-receipt', `${queueEntry.logID}`, `poqo-data-and-receipt addFinalData qId: ${queueEntry.entryID} data:${utils.makeShortHash(data.accountId)} collected keys: ${utils.stringifyReduce(Object.keys(queueEntry.collectedFinalData))}`) - } - - // if (queueEntry.state === 'syncing') { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastfinalstate', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${data.accountId}`) - // } - } - // const nodesToSendTo: Set = new Set() - - // for (const data of readableReq.finalState.stateList) { - // if (data == null) { - // continue - // } - // if (savedAccountIds.has(data.accountId) === false) { - // continue - // } - // const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(data.accountId) - // for (const node of storageNodes) { - // nodesToSendTo.add(node) - // } - // } - // if (nodesToSendTo.size > 0) { - // const finalDataToGossip = { ...readableReq.finalState, txGroupCycle: queueEntry.txGroupCycle } - // Comms.sendGossip( - // 'gossip-final-state', - // finalDataToGossip, - // null, - // null, - // Array.from(nodesToSendTo), - // false, - // 4, - // queueEntry.acceptedTx.txId - // ) - // nestedCountersInstance.countEvent(`processing`, `forwarded final data to storage nodes`) - // } - } catch (e) { - /* prettier-ignore */ if (logFlags.error) console.error(`Error processing poqoDataAndReceipt Binary handler: ${e}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - Comms.registerInternalBinary(poqoDataAndReceiptBinaryHandler.name, poqoDataAndReceiptBinaryHandler.handler) - - // Comms.registerInternal( - // 'poqo-data-and-receipt', - // async ( - // payload: { - // finalState: { txid: string; stateList: Shardus.WrappedResponse[] }, - // receipt: AppliedReceipt2 - // }, - // _respond: unknown, - // _sender: string, - // ) => { - // profilerInstance.scopedProfileSectionStart('poqo-data-and-receipt') - // try { - // // make sure we have it - // const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.finalState.txid) // , payload.timestamp) - // //It is okay to ignore this transaction if the txId is not found in the queue. - // if (queueEntry == null) { - // //In the past we would enqueue the TX, expecially if syncing but that has been removed. - // //The normal mechanism of sharing TXs is good enough. - // nestedCountersInstance.countEvent('processing', 'broadcast_finalstate_noQueueEntry') - // return - // } - - // // validate corresponding tell sender - // if (_sender == null) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid sender for txid: ${payload.finalState.txid}, sender: ${_sender}`) - // return - // } - - // if (payload.txGroupCycle) { - // if (queueEntry.txGroupCycle !== payload.txGroupCycle) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt mismatch txGroupCycle for txid: ${payload.finalState.txid}, sender's txGroupCycle: ${payload.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - // nestedCountersInstance.countEvent( - // 'poqo', - // 'poqo-data-and-receipt: mismatch txGroupCycle for txid ' + payload.finalState.txid - // ) - // } - // delete payload.txGroupCycle - // } - - // const isValidFinalDataSender = this.stateManager.transactionQueue.factValidateCorrespondingTellFinalDataSender(queueEntry, _sender) - // if (isValidFinalDataSender === false) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-data-and-receipt invalid sender ${_sender} for data: ${queueEntry.acceptedTx.txId}`) - // return - // } - - // if (!queueEntry.hasSentFinalReceipt) { - // const executionGroupNodes = new Set(queueEntry.executionGroup.map(node => node.publicKey)); - // const hasTwoThirdsMajority = this.verifyAppliedReceipt(payload.receipt, executionGroupNodes) - // if(!hasTwoThirdsMajority) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${payload.receipt.txid}`) - // nestedCountersInstance.countEvent('poqo', 'poqo-data-and-receipt: Rejecting receipt because no majority') - // return - // } - // if (logFlags.verbose) this.mainLogger.debug(`POQo: received data & receipt for ${queueEntry.logID} starting receipt gossip`) - // queueEntry.poqoReceipt = payload.receipt - // queueEntry.appliedReceipt2 = payload.receipt - // queueEntry.recievedAppliedReceipt2 = payload.receipt - // payload.txGroupCycle = queueEntry.txGroupCycle - // Comms.sendGossip( - // 'poqo-receipt-gossip', - // payload.receipt, - // null, - // null, - // queueEntry.transactionGroup, - // false, - // 4, - // payload.finalState.txid, - // '', - // true - // ) - // queueEntry.hasSentFinalReceipt = true - // } - - // if (logFlags.debug) - // this.mainLogger.debug(`poqo-data-and-receipt ${queueEntry.logID}, ${Utils.safeStringify(payload.finalState.stateList)}`) - // // add the data in - // const savedAccountIds: Set = new Set() - // for (const data of payload.finalState.stateList) { - // //let wrappedResponse = data as Shardus.WrappedResponse - // //this.queueEntryAddData(queueEntry, data) - // if (data == null) { - // /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`poqo-data-and-receipt data == null`) - // continue - // } - - // if (queueEntry.collectedFinalData[data.accountId] == null) { - // queueEntry.collectedFinalData[data.accountId] = data - // savedAccountIds.add(data.accountId) - // /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('poqo-data-and-receipt', `${queueEntry.logID}`, `poqo-data-and-receipt addFinalData qId: ${queueEntry.entryID} data:${utils.makeShortHash(data.accountId)} collected keys: ${utils.stringifyReduce(Object.keys(queueEntry.collectedFinalData))}`) - // } - - // // if (queueEntry.state === 'syncing') { - // // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastfinalstate', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${data.accountId}`) - // // } - // } - // const nodesToSendTo: Set = new Set() - // for (const data of payload.finalState.stateList) { - // if (data == null) { - // continue - // } - // if (savedAccountIds.has(data.accountId) === false) { - // continue - // } - // const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(data.accountId) - // for (const node of storageNodes) { - // nodesToSendTo.add(node) - // } - // } - // if (nodesToSendTo.size > 0) { - // const finalDataToGossip = { ...payload.finalState, txGroupCycle: queueEntry.txGroupCycle } - // Comms.sendGossip( - // 'gossip-final-state', - // finalDataToGossip, - // null, - // null, - // Array.from(nodesToSendTo), - // false, - // 4, - // queueEntry.acceptedTx.txId - // ) - // nestedCountersInstance.countEvent(`processing`, `forwarded final data to storage nodes`) - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('poqo-data-and-receipt') - // } - // } - // ) - // Comms.registerInternal( - // 'poqo-send-receipt', - // ( - // payload: AppliedReceipt2 & { txGroupCycle: number }, - // _respond: unknown, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('poqo-send-receipt', false, msgSize) - // try{ - // const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.txid) - // if (queueEntry == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-receipt: no queue entry found') - // return - // } - // if (payload.txGroupCycle) { - // if (queueEntry.txGroupCycle !== payload.txGroupCycle) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-send-receipt mismatch txGroupCycle for txid: ${payload.txid}, sender's txGroupCycle: ${payload.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - // nestedCountersInstance.countEvent( - // 'poqo', - // 'poqo-send-receipt: mismatch txGroupCycle for tx ' + payload.txid - // ) - // } - // delete payload.txGroupCycle - // } - - // if (queueEntry.poqoReceipt) { - // // We've already handled this - // return - // } - - // if (Math.random() < this.debugFailPOQo) { - // nestedCountersInstance.countEvent('poqo', 'debug fail wont forward receipt') - // return - // } - - // if (logFlags.verbose) this.mainLogger.debug(`POQo: Received receipt from aggregator for ${queueEntry.logID} starting CT2 for data & receipt`) - // const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) - // const hasTwoThirdsMajority = this.verifyAppliedReceipt(payload, executionGroupNodes) - // if (!hasTwoThirdsMajority) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${payload.txid}`) - // nestedCountersInstance.countEvent('poqo', 'poqo-send-receipt: Rejecting receipt because no majority') - // return - // } - // const receivedReceipt = payload as AppliedReceipt2 - // queueEntry.poqoReceipt = receivedReceipt - // queueEntry.appliedReceipt2 = receivedReceipt - // queueEntry.recievedAppliedReceipt2 = receivedReceipt - // queueEntry.hasSentFinalReceipt = true - // payload.txGroupCycle = queueEntry.txGroupCycle - // Comms.sendGossip( - // 'poqo-receipt-gossip', - // payload, - // null, - // null, - // queueEntry.transactionGroup, - // false, - // 4, - // payload.txid, - // '', - // true - // ) - // this.stateManager.transactionQueue.factTellCorrespondingNodesFinalData(queueEntry) - // } finally { - // profilerInstance.scopedProfileSectionEnd('poqo-send-receipt') - // } - // } - // ) - - const poqoSendReceiptBinary: Route> = { - name: InternalRouteEnum.binary_poqo_send_receipt, - handler: async (payload, respond, header) => { - const route = InternalRouteEnum.binary_poqo_send_receipt - this.profiler.scopedProfileSectionStart(route) - nestedCountersInstance.countEvent('internal', route) - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - try { - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cPoqoSendReceiptReq) - if (!requestStream) { - return errorHandler(RequestErrorEnum.InvalidRequest) - } - - const readableReq = deserializePoqoSendReceiptReq(requestStream) - - const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(readableReq.proposal.txid) - if (queueEntry == null) { - /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'binary/poqo_send_receipt: no queue entry found') - return - } - if (readableReq.txGroupCycle) { - if (queueEntry.txGroupCycle !== readableReq.txGroupCycle) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`binary_poqo_send_receipt mismatch txGroupCycle for txid: ${readableReq.proposal.txid}, sender's txGroupCycle: ${readableReq.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - nestedCountersInstance.countEvent( - 'poqo', - 'binary_poqo_send_receipt: mismatch txGroupCycle for tx ' + readableReq.proposal.txid - ) - } - delete readableReq.txGroupCycle - } - - if (queueEntry.signedReceipt) { - // We've already handled this - return - } - - const executionGroupNodes = new Set(queueEntry.executionGroup.map((node) => node.publicKey)) - const hasTwoThirdsMajority = this.verifyAppliedReceipt(readableReq, executionGroupNodes) - if (!hasTwoThirdsMajority) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`Receipt does not have the required majority for txid: ${readableReq.proposal.txid}`) - nestedCountersInstance.countEvent('poqo', 'poqo-send-receipt: Rejecting receipt because no majority') - return - } - - if (logFlags.verbose) - this.mainLogger.debug( - `POQo: Received receipt from aggregator for ${queueEntry.logID} starting CT2 for data & receipt` - ) - const receivedReceipt = readableReq as SignedReceipt - queueEntry.signedReceipt = receivedReceipt - queueEntry.hasSentFinalReceipt = true - const receiptToGossip = { ...readableReq, txGroupCycle: queueEntry.txGroupCycle } - Comms.sendGossip( - 'poqo-receipt-gossip', - receiptToGossip, - null, - null, - queueEntry.transactionGroup, - false, - 4, - readableReq.proposal.txid, - '', - true - ) - - // Only forward data if we have a valid matching preApply - if (queueEntry.ourVoteHash === readableReq.proposalHash) { - // We are a winning node - nestedCountersInstance.countEvent('poqo', 'poqo-send-receipt: forwarding data') - this.stateManager.transactionQueue.factTellCorrespondingNodesFinalData(queueEntry) - } else { - nestedCountersInstance.countEvent('poqo', "poqo-send-receipt: no matching data. Can't forward") - } - } catch (e) { - /* prettier-ignore */ if (logFlags.error) console.error(`Error processing poqoSendReceiptBinary handler: ${e}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error)this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - - Comms.registerInternalBinary(poqoSendReceiptBinary.name, poqoSendReceiptBinary.handler) - - // Comms.registerInternal( - // 'poqo-send-vote', - // async ( - // payload: AppliedVoteHash & { txGroupCycle: number }, - // _respond: unknown, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('poqo-send-vote', false, msgSize) - // try { - // const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(payload.txid) - // if (queueEntry == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-vote: no queue entry found') - // return - // } - // if (payload.txGroupCycle) { - // if (queueEntry.txGroupCycle !== payload.txGroupCycle) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`poqo-send-vote mismatch txGroupCycle for txid: ${payload.txid}, sender's txGroupCycle: ${payload.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - // nestedCountersInstance.countEvent( - // 'poqo', - // 'poqo-send-vote: mismatch txGroupCycle for tx ' + payload.txid - // ) - // } - // delete payload.txGroupCycle - // } - - // const collectedVoteHash = payload as AppliedVoteHash - - // // Check if vote hash has a sign - // if (!collectedVoteHash.sign) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-vote: no sign found') - // return - // } - // // We can reuse the same function for POQo - // this.tryAppendVoteHash(queueEntry, collectedVoteHash) - // } finally { - // profilerInstance.scopedProfileSectionEnd('poqo-send-vote') - // } - // } - // ) - - const poqoSendVoteBinaryHandler: Route> = { - name: InternalRouteEnum.binary_poqo_send_vote, - handler: (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_poqo_send_vote - profilerInstance.scopedProfileSectionStart(route, false) - try { - const stream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cPoqoSendVoteReq) - if (!payload) { - nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) - return - } - const readableReq = deserializePoqoSendVoteReq(stream) - const queueEntry = this.stateManager.transactionQueue.getQueueEntrySafe(readableReq.txid) - if (queueEntry == null) { - /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-vote: no queue entry found') - return - } - // if (readableReq.txGroupCycle) { - // if (queueEntry.txGroupCycle !== readableReq.txGroupCycle) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`binary_poqo_send_vote mismatch txGroupCycle for txid: ${readableReq.txid}, sender's txGroupCycle: ${readableReq.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - // nestedCountersInstance.countEvent( - // 'poqo', - // 'binary_poqo_send_vote: mismatch txGroupCycle for tx ' + readableReq.txid - // ) - // } - // delete readableReq.txGroupCycle - // } - const collectedVoteHash = readableReq as AppliedVoteHash - - // Check if vote hash has a sign - if (!collectedVoteHash.sign) { - /* prettier-ignore */ nestedCountersInstance.countEvent('poqo', 'poqo-send-vote: no sign found') - return - } - // We can reuse the same function for POQo - this.tryAppendVoteHash(queueEntry, collectedVoteHash) - } catch (e) { - /* prettier-ignore */ if (logFlags.error) console.error(`Error processing poqoSendVoteBinary handler: ${e}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - Comms.registerInternalBinary(poqoSendVoteBinaryHandler.name, poqoSendVoteBinaryHandler.handler) + // Bind methods from split files + Object.assign(TransactionConsenus.prototype, handlerMethods) } verifyAppliedReceipt(receipt: SignedReceipt, executionGroupNodes: Set): boolean { @@ -1992,105 +396,6 @@ class TransactionConsenus { return homeNode.node } - /** - * shareAppliedReceipt - * gossip the appliedReceipt to the transaction group - * @param queueEntry - */ - // DEPRECATED AFTER POQO - // shareAppliedReceipt(queueEntry: QueueEntry): void { - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_shareAppliedReceipt', `${queueEntry.logID}`, `qId: ${queueEntry.entryID} `) - - // if (queueEntry.appliedReceipt2 == null) { - // //take no action - // /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'shareAppliedReceipt-skipped appliedReceipt2 == null') - // return - // } - - // // share the appliedReceipt. - // const sender = null - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - - // // todo only recalc if cycle boundry? - // // let updatedGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry, true) - - // if (gossipGroup.length > 1) { - // if (queueEntry.ourNodeInTransactionGroup === false) { - // return - // } - - // // This code tried to optimize things by not having every node share a receipt. - - // // //look at our index in the consensus. - // // //only have certain nodes sharde gossip the receipt. - // // let ourIndex = queueEntry.ourTXGroupIndex - // // let groupLength = gossipGroup.length - // // if(this.stateManager.transactionQueue.executeInOneShard){ - // // //we have to use different inputs if executeInOneShard is true - // // ourIndex = queueEntry.ourExGroupIndex - // // groupLength = queueEntry.executionGroup.length - // // } - - // // if(ourIndex > 0){ - // // let everyN = Math.max(1,Math.floor(groupLength * 0.4)) - // // let nonce = parseInt('0x' + queueEntry.acceptedTx.txId.substr(0,2)) - // // let idxPlusNonce = ourIndex + nonce - // // let idxModEveryN = idxPlusNonce % everyN - // // if(idxModEveryN > 0){ - // // nestedCountersInstance.countEvent('transactionQueue', 'shareAppliedReceipt-skipped') - // // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shareAppliedReceipt-skipped', `${queueEntry.acceptedTx.txId}`, `ourIndex:${ourIndex} groupLength:${ourIndex} `) - // // return - // // } - // // } - - // nestedCountersInstance.countEvent('transactionQueue', 'shareAppliedReceipt-notSkipped') - // // should consider only forwarding in some cases? - // this.stateManager.debugNodeGroup( - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp, - // `share appliedReceipt to neighbors`, - // gossipGroup - // ) - // let payload: any = queueEntry.appliedReceipt2 - // const receipt2 = this.stateManager.getReceipt2(queueEntry) - // if (receipt2 == null) { - // nestedCountersInstance.countEvent('transactionQueue', 'shareAppliedReceipt-receipt2 == null') - // return - // } - - // if (Context.config.stateManager.attachDataToReceipt) { - // // Report data to corresponding nodes - // const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData - // const datas: { [accountID: string]: Shardus.WrappedResponse } = {} - - // const applyResponse = queueEntry.preApplyTXResult.applyResponse - // let wrappedStates = this.stateManager.useAccountWritesOnly ? {} : queueEntry.collectedData - // const writtenAccountsMap: WrappedResponses = {} - // if (applyResponse != null && applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { - // for (const writtenAccount of applyResponse.accountWrites) { - // writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - // writtenAccountsMap[writtenAccount.accountId].prevStateId = wrappedStates[writtenAccount.accountId] - // ? wrappedStates[writtenAccount.accountId].stateId - // : '' - // writtenAccountsMap[writtenAccount.accountId].prevDataCopy = null - - // datas[writtenAccount.accountId] = writtenAccount.data - // } - // //override wrapped states with writtenAccountsMap which should be more complete if it included - // wrappedStates = writtenAccountsMap - // } - // if (receipt2.confirmOrChallenge?.message === 'challenge') { - // wrappedStates = {} - // } - // payload = { receipt: queueEntry.appliedReceipt2, wrappedStates } - // } - - // //let payload = queueEntry.recievedAppliedReceipt2 ?? queueEntry.appliedReceipt2 - // this.p2p.sendGossipIn('spread_appliedReceipt2', payload, '', sender, gossipGroup, false, -1, queueEntry.acceptedTx.txId) - // if (logFlags.debug) this.mainLogger.debug(`shareAppliedReceipt ${queueEntry.logID} sent gossip`) - // } - // } - /** * hasAppliedReceiptMatchingPreApply * check if our data matches our vote @@ -2420,427 +725,7 @@ class TransactionConsenus { ) return signedReceipt } - // } else if (this.stateManager.transactionQueue.useNewPOQ === false) { - // const requiredVotes = Math.round(votingGroup.length * this.config.p2p.requiredVotesPercentage) //hacky for now. debug code: - - // if (queueEntry.debug.loggedStats1 == null) { - // queueEntry.debug.loggedStats1 = true - // nestedCountersInstance.countEvent('transactionStats', ` votingGroup:${votingGroup.length}`) - // } - - // const numVotes = queueEntry.collectedVoteHashes.length - - // if (numVotes < requiredVotes) { - // // we need more votes - // return null - // } - - // // be smart an only recalculate votes when we see a new vote show up. - // if (queueEntry.newVotes === false) { - // return null - // } - // queueEntry.newVotes = false - // let mostVotes = 0 - // let winningVoteHash: string - // const hashCounts: Map = new Map() - - // for (let i = 0; i < numVotes; i++) { - // // eslint-disable-next-line security/detect-object-injection - // const currentVote = queueEntry.collectedVoteHashes[i] - // const voteCount = hashCounts.get(currentVote.voteHash) - // let updatedVoteCount: number - // if (voteCount === undefined) { - // updatedVoteCount = 1 - // } else { - // updatedVoteCount = voteCount + 1 - // } - // hashCounts.set(currentVote.voteHash, updatedVoteCount) - // if (updatedVoteCount > mostVotes) { - // mostVotes = updatedVoteCount - // winningVoteHash = currentVote.voteHash - // } - // } - - // if (mostVotes < requiredVotes) { - // return null - // } - - // if (winningVoteHash != undefined) { - // //make the new receipt. - // const appliedReceipt2: AppliedReceipt2 = { - // txid: queueEntry.acceptedTx.txId, - // result: undefined, - // appliedVote: undefined, - // confirmOrChallenge: null, - // signatures: [], - // app_data_hash: '', - // // transaction_result: false //this was missing before.. - // } - // for (let i = 0; i < numVotes; i++) { - // // eslint-disable-next-line security/detect-object-injection - // const currentVote = queueEntry.collectedVoteHashes[i] - // if (currentVote.voteHash === winningVoteHash) { - // appliedReceipt2.signatures.push(currentVote.sign) - // } - // } - // //result and appliedVote must be set using a winning vote.. - // //we may not have this yet - - // if (queueEntry.ourVote != null && queueEntry.ourVoteHash === winningVoteHash) { - // appliedReceipt2.result = queueEntry.ourVote.transaction_result - // appliedReceipt2.appliedVote = queueEntry.ourVote - // // now send it !!! - - // queueEntry.appliedReceipt2 = appliedReceipt2 - - // for (let i = 0; i < queueEntry.ourVote.account_id.length; i++) { - // /* eslint-disable security/detect-object-injection */ - // if (queueEntry.ourVote.account_id[i] === 'app_data_hash') { - // appliedReceipt2.app_data_hash = queueEntry.ourVote.account_state_hash_after[i] - // break - // } - // /* eslint-enable security/detect-object-injection */ - // } - - // //this is a temporary hack to reduce the ammount of refactor needed. - // const appliedReceipt: AppliedReceipt = { - // txid: queueEntry.acceptedTx.txId, - // result: queueEntry.ourVote.transaction_result, - // appliedVotes: [queueEntry.ourVote], - // confirmOrChallenge: [], - // app_data_hash: appliedReceipt2.app_data_hash, - // } - // queueEntry.appliedReceipt = appliedReceipt - - // return appliedReceipt - // } - // } - // } else { - // if (queueEntry.completedConfirmedOrChallenge === false && queueEntry.isInExecutionHome) { - // if (this.stateManager.consensusLog || logFlags.debug) - // this.mainLogger.info( - // `tryProduceReceipt ${queueEntry.logID} completedConfirmedOrChallenge === false and isInExecutionHome` - // ) - // nestedCountersInstance.countEvent('consensus', 'tryProduceReceipt still in confirm/challenge stage') - // return - // } - // const now = shardusGetTime() - // const timeSinceLastConfirmOrChallenge = - // queueEntry.lastConfirmOrChallengeTimestamp > 0 - // ? now - queueEntry.lastConfirmOrChallengeTimestamp - // : 0 - // const timeSinceFirstMessage = - // queueEntry.firstConfirmOrChallengeTimestamp > 0 - // ? now - queueEntry.firstConfirmOrChallengeTimestamp - // : 0 - // const hasWaitedLongEnough = - // timeSinceLastConfirmOrChallenge >= this.config.stateManager.waitTimeBeforeReceipt - // const hasWaitLimitReached = - // timeSinceFirstMessage >= this.config.stateManager.waitLimitAfterFirstMessage - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} hasWaitedLongEnough: ${hasWaitedLongEnough}, hasWaitLimitReached: ${hasWaitLimitReached}, timeSinceLastConfirmOrChallenge: ${timeSinceLastConfirmOrChallenge} ms, timeSinceFirstMessage: ${timeSinceFirstMessage} ms` - // ) - // // check if last vote confirm/challenge received is waitTimeBeforeReceipt ago - // if (timeSinceLastConfirmOrChallenge >= this.config.stateManager.waitTimeBeforeReceipt) { - // // stop accepting the vote messages, confirm or challenge for this tx - // queueEntry.acceptConfirmOrChallenge = false - // nestedCountersInstance.countEvent('consensus', 'tryProduceReceipt hasWaitedLongEnough: true') - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} stopped accepting confirm/challenge messages` - // ) - - // if (logFlags.debug) this.mainLogger.debug(`tryProduceReceipt: ${queueEntry.logID} ready to decide final receipt. bestReceivedChallenge: ${utils.stringifyReduce(queueEntry.receivedBestChallenge)}, bestReceivedConfirmation: ${utils.stringifyReduce(queueEntry.receivedBestConfirmation)}, receivedBestConfirmedNode: ${utils.stringifyReduce(queueEntry.receivedBestConfirmedNode)}`) // prettier-ignore - // if (this.stateManager.consensusLog) { - // this.mainLogger.debug(`tryProduceReceipt: ${queueEntry.logID} ready to decide final receipt.`) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} uniqueChallengesCount: ${queueEntry.uniqueChallengesCount}` - // ) - // } - - // // we have received challenge message, produce failed receipt - // if ( - // queueEntry.receivedBestChallenge && - // queueEntry.receivedBestChallenger && - // queueEntry.uniqueChallengesCount >= this.config.stateManager.minRequiredChallenges - // ) { - // nestedCountersInstance.countEvent('consensus', 'tryProduceReceipt producing fail receipt from unique challenges') - // const appliedReceipt: AppliedReceipt = { - // txid: queueEntry.receivedBestChallenge.appliedVote.txid, - // result: false, - // appliedVotes: [queueEntry.receivedBestChallenge.appliedVote], - // confirmOrChallenge: [queueEntry.receivedBestChallenge], - // app_data_hash: queueEntry.receivedBestChallenge.appliedVote.app_data_hash, - // } - // const appliedReceipt2: AppliedReceipt2 = { - // txid: queueEntry.receivedBestChallenge.appliedVote.txid, - // result: false, - // appliedVote: queueEntry.receivedBestChallenge.appliedVote, - // confirmOrChallenge: queueEntry.receivedBestChallenge, - // app_data_hash: queueEntry.receivedBestChallenge.appliedVote.app_data_hash, - // signatures: [queueEntry.receivedBestChallenge.appliedVote.sign], - // } - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${ - // queueEntry.logID - // } producing a fail receipt based on received challenge message. appliedReceipt: ${utils.stringifyReduce( - // appliedReceipt2 - // )}` - // ) - - // // todo: we still need to check if we have a better challenge receipt from robust query ?? - // const robustQueryResult = await this.robustQueryConfirmOrChallenge(queueEntry) - // const robustConfirmOrChallenge = robustQueryResult?.result - // const robustUniqueCount = robustQueryResult?.uniqueCount - // if (this.stateManager.consensusLog) { - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} robustChallenge: ${utils.stringifyReduce( - // robustConfirmOrChallenge - // )}, robustUniqueCount: ${robustUniqueCount}` - // ) - // } - // if (robustConfirmOrChallenge == null) { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt robust query for challenge failed' - // ) - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} failed to query robust challenge` - // ) - // return - // } - // queueEntry.robustQueryConfirmOrChallengeCompleted = true - - // // Received a confrim receipt. We have a challenge receipt which is better. - // if (robustConfirmOrChallenge && robustConfirmOrChallenge.message === 'confirm') { - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} received a confirm message. We have enough challenge messages which is better` - // ) - // // just use our challenge receipt and return - // queueEntry.appliedReceipt = appliedReceipt - // queueEntry.appliedReceipt2 = appliedReceipt2 - // return appliedReceipt - // } - - // // Received another challenge receipt. Compare ranks. Lower is better - // let bestNodeFromRobustQuery: Shardus.NodeWithRank - // if (queueEntry.executionGroupMap.has(robustConfirmOrChallenge.nodeId)) { - // bestNodeFromRobustQuery = queueEntry.executionGroupMap.get( - // robustConfirmOrChallenge.nodeId - // ) as Shardus.NodeWithRank - // } - // const isRobustQueryNodeBetter = bestNodeFromRobustQuery.rank < queueEntry.receivedBestChallenger.rank - // if ( - // isRobustQueryNodeBetter && - // robustUniqueCount >= this.config.stateManager.minRequiredChallenges - // ) { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt challenge from network is better than our challenge' - // ) - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${ - // queueEntry.logID - // } challenge from robust query is better than our challenge. robustQueryConfirmOrChallenge: ${Utils.safeStringify( - // robustConfirmOrChallenge - // )}` - // ) - // const robustReceipt: AppliedReceipt = { - // txid: robustConfirmOrChallenge.appliedVote.txid, - // result: false, - // appliedVotes: [robustConfirmOrChallenge.appliedVote], - // confirmOrChallenge: [robustConfirmOrChallenge], - // app_data_hash: robustConfirmOrChallenge.appliedVote.app_data_hash, - // } - // const robustReceipt2: AppliedReceipt2 = { - // txid: robustConfirmOrChallenge.appliedVote.txid, - // result: false, - // appliedVote: robustConfirmOrChallenge.appliedVote, - // confirmOrChallenge: robustConfirmOrChallenge, - // app_data_hash: robustConfirmOrChallenge.appliedVote.app_data_hash, - // signatures: [robustConfirmOrChallenge.appliedVote.sign], - // } - // queueEntry.appliedReceipt = robustReceipt - // queueEntry.appliedReceipt2 = robustReceipt2 - // return robustReceipt - // } else { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt robustQueryConfirmOrChallenge is NOT better' - // ) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`tryProduceReceipt: ${queueEntry.logID} challenge from robust query is not better than our challenge. use our challenge: ${utils.stringifyReduce(appliedReceipt2)}`) - // queueEntry.appliedReceipt = appliedReceipt - // queueEntry.appliedReceipt2 = appliedReceipt2 - // return appliedReceipt - // } - // } - - // // create receipt - // // The receipt for the transactions is the lowest ranked challenge message or if there is no challenge the lowest ranked confirm message - // // loop through "confirm" messages and "challenge" messages to decide the final receipt - // if (queueEntry.receivedBestConfirmation && queueEntry.receivedBestConfirmedNode) { - // const winningVote = queueEntry.receivedBestConfirmation.appliedVote - // const appliedReceipt: AppliedReceipt = { - // txid: winningVote.txid, - // result: winningVote.transaction_result, - // appliedVotes: [winningVote], - // confirmOrChallenge: [queueEntry.receivedBestConfirmation], - // app_data_hash: winningVote.app_data_hash, - // } - // const appliedReceipt2: AppliedReceipt2 = { - // txid: winningVote.txid, - // result: winningVote.transaction_result, - // appliedVote: winningVote, - // confirmOrChallenge: queueEntry.receivedBestConfirmation, - // app_data_hash: winningVote.app_data_hash, - // signatures: [winningVote.sign], - // } - // if (logFlags.debug || this.stateManager.consensusLog) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} producing a confirm receipt based on received confirmation message.` - // ) - // for (let i = 0; i < winningVote.account_id.length; i++) { - // /* eslint-disable security/detect-object-injection */ - // if (winningVote.account_id[i] === 'app_data_hash') { - // appliedReceipt.app_data_hash = winningVote.account_state_hash_after[i] - // appliedReceipt2.app_data_hash = winningVote.account_state_hash_after[i] - // break - // } - // /* eslint-enable security/detect-object-injection */ - // } - // // do a robust query to confirm that we have the best receipt - // // (lower the rank of confirm message, the better the receipt is) - // const robustQueryResult = await this.robustQueryConfirmOrChallenge(queueEntry) - // const robustConfirmOrChallenge = robustQueryResult?.result - - // if (this.stateManager.consensusLog) { - // this.mainLogger.debug( - // `tryProduceReceipt: ${ - // queueEntry.logID - // } got result robustConfirmOrChallenge: ${utils.stringifyReduce(robustConfirmOrChallenge)}` - // ) - // } - - // if (robustConfirmOrChallenge == null || robustConfirmOrChallenge.message == null) { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt robustQueryConfirmOrChallenge confirm failed' - // ) - // if (logFlags.debug || this.stateManager.consensusLog) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} failed to query best challenge/message from robust query` - // ) - // return // this will prevent OOS - // } - // queueEntry.robustQueryConfirmOrChallengeCompleted = true - - // // Received challenge receipt, we have confirm receipt which is not as strong as challenge receipt - // if (robustConfirmOrChallenge.message === 'challenge') { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt robustQueryConfirmOrChallenge is challenge, we have confirmation' - // ) - // const robustReceipt: AppliedReceipt = { - // txid: robustConfirmOrChallenge.appliedVote.txid, - // result: false, - // appliedVotes: [robustConfirmOrChallenge.appliedVote], - // confirmOrChallenge: [robustConfirmOrChallenge], - // app_data_hash: robustConfirmOrChallenge.appliedVote.app_data_hash, - // } - // const robustReceipt2: AppliedReceipt2 = { - // txid: robustConfirmOrChallenge.appliedVote.txid, - // result: false, - // appliedVote: robustConfirmOrChallenge.appliedVote, - // confirmOrChallenge: robustConfirmOrChallenge, - // app_data_hash: robustConfirmOrChallenge.appliedVote.app_data_hash, - // signatures: [robustConfirmOrChallenge.appliedVote.sign], - // } - // queueEntry.appliedReceipt = robustReceipt - // queueEntry.appliedReceipt2 = robustReceipt2 - // return robustReceipt - // } - // // mark that we have a robust confirmation, should not expire the tx - // queueEntry.hasRobustConfirmation = true - - // // Received another confirm receipt. Compare ranks - // let bestNodeFromRobustQuery: Shardus.NodeWithRank - // if (queueEntry.executionGroupMap.has(robustConfirmOrChallenge.nodeId)) { - // bestNodeFromRobustQuery = queueEntry.executionGroupMap.get( - // robustConfirmOrChallenge.nodeId - // ) as Shardus.NodeWithRank - // } - - // const isRobustQueryNodeBetter = - // bestNodeFromRobustQuery.rank < queueEntry.receivedBestConfirmedNode.rank - // if (isRobustQueryNodeBetter) { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt robustQueryConfirmOrChallenge is better' - // ) - // if (this.stateManager.consensusLog) { - // this.mainLogger.debug( - // `tryProducedReceipt: ${ - // queueEntry.logID - // } robust confirmation result is better. ${utils.stringifyReduce(robustConfirmOrChallenge)}` - // ) - // } - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${ - // queueEntry.logID - // } confirmation from robust query is better than our confirm. bestNodeFromRobust?Query: ${Utils.safeStringify( - // bestNodeFromRobustQuery - // )}, queueEntry.receivedBestVoter: ${Utils.safeStringify( - // queueEntry.receivedBestVoter - // )}, robustQueryConfirmOrChallenge: ${Utils.safeStringify(robustConfirmOrChallenge)}` - // ) - // const robustReceipt: AppliedReceipt = { - // txid: robustConfirmOrChallenge.appliedVote.txid, - // result: robustConfirmOrChallenge.appliedVote.transaction_result, - // appliedVotes: [robustConfirmOrChallenge.appliedVote], - // confirmOrChallenge: [robustConfirmOrChallenge], - // app_data_hash: robustConfirmOrChallenge.appliedVote.app_data_hash, - // } - // const robustReceipt2: AppliedReceipt2 = { - // txid: robustConfirmOrChallenge.appliedVote.txid, - // result: robustConfirmOrChallenge.appliedVote.transaction_result, - // appliedVote: robustConfirmOrChallenge.appliedVote, - // confirmOrChallenge: robustConfirmOrChallenge, - // app_data_hash: robustConfirmOrChallenge.appliedVote.app_data_hash, - // signatures: [robustConfirmOrChallenge.appliedVote.sign], - // } - // queueEntry.appliedReceipt = robustReceipt - // queueEntry.appliedReceipt2 = robustReceipt2 - // return robustReceipt - // } else { - // if (this.stateManager.consensusLog) { - // this.mainLogger.debug( - // `tryProducedReceipt: ${queueEntry.logID} robust confirmation result is NOT better. Using our best received confirmation` - // ) - // } - // queueEntry.appliedReceipt = appliedReceipt - // queueEntry.appliedReceipt2 = appliedReceipt2 - // return queueEntry.appliedReceipt - // } - // } else { - // nestedCountersInstance.countEvent( - // 'consensus', - // 'tryProduceReceipt waitedEnough: true. no confirm or challenge received' - // ) - // return null - // } - // } else { - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryProduceReceipt: ${queueEntry.logID} not producing receipt yet because timeSinceLastConfirmOrChallenge is ${timeSinceLastConfirmOrChallenge} ms` - // ) - // } - // } + return null } catch (e) { //if (logFlags.error) this.mainLogger.error(`tryProduceReceipt: error ${queueEntry.logID} error: ${e.message}`) @@ -2851,92 +736,6 @@ class TransactionConsenus { } } - // DEPRECATED AFTER POQO - // async robustQueryBestReceipt(queueEntry: QueueEntry): Promise { - // this.profiler.profileSectionStart('robustQueryBestReceipt', true) - // this.profiler.scopedProfileSectionStart('robustQueryBestReceipt') - // try { - // const queryFn = async (node: Shardus.Node): Promise => { - // const ip = node.externalIp - // const port = node.externalPort - // // the queryFunction must return null if the given node is our own - // if (ip === Self.ip && port === Self.port) return null - // const message: RequestReceiptForTxReq = { - // txid: queueEntry.acceptedTx.txId, - // timestamp: queueEntry.acceptedTx.timestamp, - // } - // return await Comms.ask(node, 'request_receipt_for_tx', message) - // } - // const eqFn = (item1: RequestReceiptForTxResp, item2: RequestReceiptForTxResp): boolean => { - // const deepCompare = (obj1: any, obj2: any): boolean => { - // // If both are null or undefined or exactly the same value - // if (obj1 === obj2) { - // return true - // } - - // // If only one is null or undefined - // if (obj1 === null || obj2 === null || typeof obj1 !== 'object' || typeof obj2 !== 'object') { - // return false - // } - - // // Compare arrays - // if (Array.isArray(obj1) && Array.isArray(obj2)) { - // if (obj1.length !== obj2.length) { - // return false - // } - // for (let i = 0; i < obj1.length; i++) { - // if (!deepCompare(obj1[i], obj2[i])) { - // return false - // } - // } - // return true - // } - - // // Compare objects - // const keys1 = Object.keys(obj1) - // const keys2 = Object.keys(obj2) - - // if (keys1.length !== keys2.length) { - // return false - // } - - // for (const key of keys1) { - // if (!keys2.includes(key)) { - // return false - // } - // if (!deepCompare(obj1[key], obj2[key])) { - // return false - // } - // } - - // return true - // } - // try { - // // Deep compare item.receipt - // return deepCompare(item1.receipt, item2.receipt) - // } catch (err) { - // return false - // } - // } - // const redundancy = 3 - // const { topResult: response } = await robustQuery( - // this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry), - // queryFn, - // eqFn, - // redundancy, - // true - // ) - // if (response && response.receipt) { - // return response.receipt - // } - // } catch (e) { - // this.mainLogger.error(`robustQueryBestReceipt: ${queueEntry.logID} error: ${e.message}`) - // } finally { - // this.profiler.scopedProfileSectionEnd('robustQueryBestReceipt') - // this.profiler.profileSectionEnd('robustQueryBestReceipt', true) - // } - // } - async robustQueryBestVote(queueEntry: QueueEntry): Promise { profilerInstance.profileSectionStart('robustQueryBestVote', true) profilerInstance.scopedProfileSectionStart('robustQueryBestVote') @@ -3197,302 +996,10 @@ class TransactionConsenus { } } - // async confirmOrChallenge(queueEntry: QueueEntry): Promise { - // try { - // if (queueEntry.ourVote == null && queueEntry.isInExecutionHome) { - // nestedCountersInstance.countEvent('confirmOrChallenge', 'ourVote == null and isInExecutionHome') - // return - // } - // if (queueEntry.completedConfirmedOrChallenge) { - // nestedCountersInstance.countEvent('confirmOrChallenge', 'already completedConfirmedOrChallenge') - // return - // } - // if (queueEntry.queryingRobustVote) { - // nestedCountersInstance.countEvent('confirmOrChallenge', 'in the middle of querying robust vote') - // return - // } - // if (queueEntry.queryingRobustAccountData) { - // nestedCountersInstance.countEvent( - // 'confirmOrChallenge', - // 'in the middle of querying robust account data' - // ) - // return - // } - // if (logFlags.debug) - // this.mainLogger.debug( - // `confirmOrChallenge: ${queueEntry.logID} receivedBestVote: ${Utils.safeStringify( - // queueEntry.receivedBestVote - // )}} ` - // ) - - // this.profiler.profileSectionStart('confirmOrChallenge') - // if (logFlags.profiling_verbose) this.profiler.scopedProfileSectionStart('confirmOrChallenge') - - // const now = shardusGetTime() - // // if we are in lowest 10% of execution group and agrees with the highest ranked vote, send out a confirm msg - // const timeSinceLastVoteMessage = - // queueEntry.lastVoteReceivedTimestamp > 0 ? now - queueEntry.lastVoteReceivedTimestamp : 0 - // const timeSinceFirstVote = - // queueEntry.firstVoteReceivedTimestamp > 0 ? now - queueEntry.firstVoteReceivedTimestamp : 0 - // // check if last confirm/challenge received is 1s ago - // const hasWaitedLongEnough = timeSinceLastVoteMessage >= this.config.stateManager.waitTimeBeforeConfirm - // const hasWaitLimitReached = timeSinceFirstVote >= this.config.stateManager.waitLimitAfterFirstVote - // if (logFlags.verbose && this.stateManager.consensusLog) - // this.mainLogger.debug( - // `confirmOrChallenge: ${queueEntry.logID} hasWaitedLongEnough: ${hasWaitedLongEnough}, hasWaitLimitReached: ${hasWaitLimitReached}, timeSinceLastVoteMessage: ${timeSinceLastVoteMessage} ms, timeSinceFirstVote: ${timeSinceFirstVote} ms` - // ) - // if (hasWaitedLongEnough || hasWaitLimitReached) { - // nestedCountersInstance.countEvent('confirmOrChallenge', 'hasWaitedLongEnough or hasWaitLimitReached') - // // stop accepting the vote messages for this tx - // queueEntry.acceptVoteMessage = false - // const eligibleToConfirm = queueEntry.eligibleNodeIdsToConfirm.has(Self.id) - // const eligibleToChallenge = true - // if (this.stateManager.consensusLog || logFlags.debug) { - // this.mainLogger.info( - // `confirmOrChallenge: ${queueEntry.logID} hasWaitedLongEnough: true. Now we will try to confirm or challenge. eligibleToConfirm: ${eligibleToConfirm}, eligibleToChallenge: ${eligibleToChallenge}` - // ) - // } - - // // confirm that current vote is the winning highest ranked vote using robustQuery - // const voteFromRobustQuery = await this.robustQueryBestVote(queueEntry) - // if (voteFromRobustQuery == null) { - // // we cannot confirm the best vote from network - // this.mainLogger.error(`confirmOrChallenge: ${queueEntry.logID} We cannot get voteFromRobustQuery`) - // nestedCountersInstance.countEvent('confirmOrChallenge', 'cannot get robust vote from network') - // return - // } - // /* prettier ignore */if (this.mainLogger.debug || this.stateManager.consensusLog) this.mainLogger.debug(`confirmOrChallenge: ${queueEntry.logID} voteFromRobustQuery: ${utils.stringifyReduce(voteFromRobustQuery)}`) - // let bestVoterFromRobustQuery: Shardus.NodeWithRank - // for (let i = 0; i < queueEntry.executionGroup.length; i++) { - // const node = queueEntry.executionGroup[i] - // if (node.id === voteFromRobustQuery.node_id) { - // bestVoterFromRobustQuery = node as Shardus.NodeWithRank - // break - // } - // } - // if (bestVoterFromRobustQuery == null) { - // // we cannot confirm the best voter from network - // this.mainLogger.error( - // `confirmOrChallenge: ${queueEntry.logID} We cannot get bestVoter from robustQuery for tx ${queueEntry.logID}` - // ) - // nestedCountersInstance.countEvent('confirmOrChallenge', 'cannot get robust voter from network') - // return - // } - // queueEntry.robustQueryVoteCompleted = true - - // // if vote from robust is better than our received vote, use it as final vote - // const isRobustQueryVoteBetter = bestVoterFromRobustQuery.rank > queueEntry.receivedBestVoter.rank - // let finalVote = queueEntry.receivedBestVote - // let finalVoteHash = queueEntry.receivedBestVoteHash - // if (isRobustQueryVoteBetter) { - // nestedCountersInstance.countEvent('confirmOrChallenge', 'robust query vote is better') - // finalVote = voteFromRobustQuery - // finalVoteHash = this.calculateVoteHash(voteFromRobustQuery) - // queueEntry.receivedBestVote = voteFromRobustQuery - // queueEntry.receivedBestVoter = bestVoterFromRobustQuery - // queueEntry.receivedBestVoteHash = finalVoteHash - // if (this.stateManager.consensusLog) { - // this.mainLogger.info(`confirmOrChallenge: ${queueEntry.logID} robust query vote is better`) - // } - // } else { - // if (this.stateManager.consensusLog) { - // this.mainLogger.info( - // `confirmOrChallenge: ${ - // queueEntry.logID - // } robust query vote is NOT better. ${utils.stringifyReduce(queueEntry.receivedBestVote)}` - // ) - // } - // } - // const shouldChallenge = queueEntry.ourVoteHash != null && queueEntry.ourVoteHash !== finalVoteHash - - // if (this.stateManager.consensusLog) - // this.mainLogger.debug( - // `confirmOrChallenge: ${queueEntry.logID} isInExecutionSet: ${queueEntry.isInExecutionHome}, eligibleToConfirm: ${eligibleToConfirm}, shouldChallenge: ${shouldChallenge}` - // ) - // if (this.produceBadChallenge || shouldChallenge) { - // if (!shouldChallenge && logFlags.debug) { - // this.mainLogger.debug( - // `confirmOrChallenge: ${queueEntry.logID} I'm a bad node producing a bad challenge` - // ) - // } - // this.challengeVoteAndShare(queueEntry) - // return - // } - - // if (eligibleToConfirm && queueEntry.ourVoteHash === finalVoteHash) { - // // queueEntry.eligibleNodesToConfirm is sorted highest to lowest rank - // const confirmNodeIds = Array.from(queueEntry.eligibleNodeIdsToConfirm).reverse() - // const ourRankIndex = confirmNodeIds.indexOf(Self.id) - // // let delayBeforeConfirm = ourRankIndex * 50 // 50ms - // // - // // if (delayBeforeConfirm > 500) delayBeforeConfirm = 500 // we don't want to wait too long - // // - // // if (delayBeforeConfirm > 0) { - // // await utils.sleep(delayBeforeConfirm) - // // - // // // Compare our rank with received rank before sharing our confirmation - // // if ( - // // queueEntry.receivedBestConfirmedNode && - // // queueEntry.receivedBestConfirmedNode.rank < queueEntry.ourNodeRank - // // ) { - // // nestedCountersInstance.countEvent( - // // 'confirmOrChallenge', - // // `isReceivedBetterConfirmation after ${delayBeforeConfirm}ms delay: true` - // // ) - // // if (logFlags.debug) - // // this.mainLogger.debug( - // // `confirmOrChallenge: ${ - // // queueEntry.logID - // // } received better confirmation before we share ours, receivedBestConfirmation: ${utils.stringifyReduce( - // // queueEntry.receivedBestConfirmation - // // )}` - // // ) - // // queueEntry.completedConfirmedOrChallenge = true - // // return - // // } - // // nestedCountersInstance.countEvent( - // // 'confirmOrChallenge', - // // `isReceivedBetterConfirmation after ${delayBeforeConfirm}ms delay: false` - // // ) - // // } - // this.confirmVoteAndShare(queueEntry) - // } else if (eligibleToConfirm === false) { - // // we are not eligible to confirm - // if (this.stateManager.consensusLog) - // this.mainLogger.debug( - // `confirmOrChallenge: ${queueEntry.logID} not eligible to confirm. set completedConfirmedOrChallenge to true` - // ) - // queueEntry.completedConfirmedOrChallenge = true - // } - // } else { - // nestedCountersInstance.countEvent('confirmOrChallenge', 'still early for confirm or challenge') - // if (logFlags.debug) - // this.mainLogger.debug( - // `confirmOrChallenge: ${queueEntry.logID} not sending confirm or challenge yet because timeSinceLastVoteMessage is ${timeSinceLastVoteMessage} ms` - // ) - // } - // } catch (e) { - // this.mainLogger.error(`confirmOrChallenge: ${queueEntry.logID} error: ${e.message}, ${e.stack}`) - // } finally { - // if (logFlags.profiling_verbose) this.profiler.scopedProfileSectionEnd('confirmOrChallenge') - // this.profiler.profileSectionEnd('confirmOrChallenge') - // } - // } - sortByAccountId(first: Shardus.WrappedResponse, second: Shardus.WrappedResponse): Ordering { return utils.sortAscProp(first, second, 'accountId') } - // DEPRECATED AFTER POQO - // async confirmVoteAndShare(queueEntry: QueueEntry): Promise { - // this.profiler.profileSectionStart('confirmVoteAndShare') - // try { - // /* prettier-ignore */ - // if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote("shrd_confirmOrChallengeVote", `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} `); - - // // podA: POQ3 create confirm message and share to tx group - // const confirmMessage: ConfirmOrChallengeMessage = { - // message: 'confirm', - // nodeId: Self.id, - // appliedVote: queueEntry.receivedBestVote, - // } - // const signedConfirmMessage = this.crypto.sign(confirmMessage) - // if (this.stateManager.consensusLog) this.mainLogger.debug(`confirmVoteAndShare: ${queueEntry.logID}`) - - // //Share message to tx group - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // Comms.sendGossip('spread_confirmOrChallenge', signedConfirmMessage, '', Self.id, gossipGroup, true, 10, queueEntry.acceptedTx.txId, `confirmVote_${NodeList.activeIdToPartition.get(signedConfirmMessage.appliedVote?.node_id)}`) - // this.tryAppendMessage(queueEntry, signedConfirmMessage) - // queueEntry.gossipedConfirmOrChallenge = true - // queueEntry.completedConfirmedOrChallenge = true - // if (this.stateManager.consensusLog) - // this.mainLogger.debug(`completedConfirmOrChallenge: ${queueEntry.logID}`) - // } catch (e) { - // this.mainLogger.error(`confirmVoteAndShare: ${queueEntry.logID} error: ${e.message}`) - // } finally { - // this.profiler.profileSectionEnd('confirmVoteAndShare') - // } - // } - - // async challengeVoteAndShare(queueEntry: QueueEntry): Promise { - // this.profiler.profileSectionStart('challengeVoteAndShare') - // try { - // /* prettier-ignore */ - // if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote("shrd_confirmOrChallengeVote", `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} `); - - // // Should check account integrity only when before states are different from best vote - // let doStatesMatch = true - // const voteBeforeStates = queueEntry.receivedBestVote.account_state_hash_before - // const ourCollectedData = Object.values(queueEntry.collectedData) - // if (voteBeforeStates.length !== ourCollectedData.length) { - // doStatesMatch = false - // } - // for (let i = 0; i < voteBeforeStates.length; i++) { - // if (ourCollectedData[i] == null) { - // doStatesMatch = false - // nestedCountersInstance.countEvent( - // 'confirmOrChallenge', - // 'tryChallengeVoteAndShare canceled because ourCollectedData is null' - // ) - // break - // } - // if (voteBeforeStates[i] !== ourCollectedData[i].stateId) { - // doStatesMatch = false - // nestedCountersInstance.countEvent( - // 'confirmOrChallenge', - // 'tryChallengeVoteAndShare states do not match' - // ) - // break - // } - // } - // if (this.produceBadChallenge) doStatesMatch = false - // let isAccountIntegrityOk = false - - // if (doStatesMatch) { - // isAccountIntegrityOk = true - // } else if (doStatesMatch === false && this.config.stateManager.integrityCheckBeforeChallenge === true) { - // isAccountIntegrityOk = await this.checkAccountIntegrity(queueEntry) - // } else { - // isAccountIntegrityOk = true - // } - - // if (!isAccountIntegrityOk) { - // nestedCountersInstance.countEvent( - // 'confirmOrChallenge', - // 'tryChallengeVoteAndShare account integrity not ok.' - // ) - // if (logFlags.verbose) - // this.mainLogger.debug(`challengeVoteAndShare: ${queueEntry.logID} account integrity is not ok`) - // // we should not challenge or confirm if account integrity is not ok - // queueEntry.completedConfirmedOrChallenge = true - // return - // } - - // //podA: POQ4 create challenge message and share to tx group - // const challengeMessage: ConfirmOrChallengeMessage = { - // message: 'challenge', - // nodeId: queueEntry.ourVote.node_id, - // appliedVote: queueEntry.receivedBestVote, - // } - // const signedChallengeMessage = this.crypto.sign(challengeMessage) - // if (logFlags.debug) - // this.mainLogger.debug( - // `challengeVoteAndShare: ${queueEntry.logID} ${Utils.safeStringify(signedChallengeMessage)}}` - // ) - - // //Share message to tx group - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // Comms.sendGossip('spread_confirmOrChallenge', signedChallengeMessage, '', null, gossipGroup, true, 10, queueEntry.acceptedTx.txId, `challengeVote_${NodeList.activeIdToPartition.get(signedChallengeMessage.appliedVote?.node_id)}`) - // this.tryAppendMessage(queueEntry, signedChallengeMessage) - // queueEntry.gossipedConfirmOrChallenge = true - // queueEntry.completedConfirmedOrChallenge = true - // } catch (e) { - // this.mainLogger.error(`challengeVoteAndShare: ${queueEntry.logID} error: ${e.message}`) - // } finally { - // this.profiler.profileSectionEnd('challengeVoteAndShare') - // } - // } - async checkAccountIntegrity(queueEntry: QueueEntry): Promise { this.profiler.scopedProfileSectionStart('checkAccountIntegrity') queueEntry.queryingRobustAccountData = true @@ -3713,113 +1220,7 @@ class TransactionConsenus { } this.poqoVoteSendLoop(queueEntry, appliedVoteHash) return - // } - - // if (this.stateManager.transactionQueue.useNewPOQ) { - // if (isEligibleToShareVote === false) { - // nestedCountersInstance.countEvent( - // 'transactionConsensus', - // 'createAndShareVote isEligibleToShareVote:' + ' false' - // ) - // return - // } - // const ourRankIndex = Array.from(queueEntry.eligibleNodeIdsToVote).indexOf(ourNodeId) - // let delayBeforeVote = ourRankIndex * 10 // 10ms x rank index - - // if (delayBeforeVote > 500) { - // delayBeforeVote = 500 - // } - - // nestedCountersInstance.countEvent( - // 'transactionConsensus', - // `createAndShareVote delayBeforeSharingVote: ${delayBeforeVote} ms` - // ) - - // if (delayBeforeVote > 0) { - // await utils.sleep(delayBeforeVote) - - // // Compare our rank with received rank - // if (queueEntry.receivedBestVoter && queueEntry.receivedBestVoter.rank > queueEntry.ourNodeRank) { - // isReceivedBetterVote = true - // } - - // if (isReceivedBetterVote) { - // if (this.stateManager.consensusLog) - // this.mainLogger.debug(`createAndShareVote received better vote`) - // nestedCountersInstance.countEvent( - // 'transactionConsensus', - // 'createAndShareVote isReceivedBetterVote: true' - // ) - // return - // } - // } - - // // tryAppend before sharing - // const appendWorked = this.tryAppendVote(queueEntry, ourVote) - // if (appendWorked === false) { - // nestedCountersInstance.countEvent('transactionConsensus', 'createAndShareVote appendFailed') - // } - // } - - // let gossipGroup = [] - // if ( - // this.stateManager.transactionQueue.executeInOneShard === true && - // this.stateManager.transactionQueue.useNewPOQ === false - // ) { - // //only share with the exection group - // gossipGroup = queueEntry.executionGroup - // } else { - // //sharing with the entire transaction group actually.. - // gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // } - - // if (gossipGroup.length >= 1) { - // this.stateManager.debugNodeGroup( - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp, - // `share tx vote to neighbors`, - // gossipGroup - // ) - - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`createAndShareVote numNodes: ${gossipGroup.length} stats:${utils.stringifyReduce(stats)} ourVote: ${utils.stringifyReduce(ourVote)}`) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('createAndShareVote', `${queueEntry.acceptedTx.txId}`, `numNodes: ${gossipGroup.length} stats:${utils.stringifyReduce(stats)} ourVote: ${utils.stringifyReduce(ourVote)} `) - - // // Filter nodes before we send tell() - // const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( - // gossipGroup, - // 'createAndShareVote', - // true, - // true - // ) - // if (filteredNodes.length === 0) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('createAndShareVote: filterValidNodesForInternalMessage no valid nodes left to try') - // return null - // } - // const filteredConsensusGroup = filteredNodes - - // if (this.stateManager.transactionQueue.useNewPOQ) { - // // Gossip the vote to the entire consensus group - // // Comms.sendGossip('gossip-applied-vote', ourVote, '', null, filteredConsensusGroup, true, 4, queueEntry.acceptedTx.txId, `${NodeList.activeIdToPartition.get(ourVote.node_id)}`) - // } else { - // this.profiler.profileSectionStart('createAndShareVote-tell') - // // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.spreadAppliedVoteHashBinary) { - // const request = appliedVoteHash as AppliedVoteHash - // this.p2p.tellBinary( - // filteredConsensusGroup, - // InternalRouteEnum.binary_spread_appliedVoteHash, - // request, - // serializeSpreadAppliedVoteHashReq, - // {} - // ) - // // } else { - // // this.p2p.tell(filteredConsensusGroup, 'spread_appliedVoteHash', appliedVoteHash) - // // } - - // this.profiler.profileSectionEnd('createAndShareVote-tell') - // } - // } else { - // nestedCountersInstance.countEvent('transactionQueue', 'createAndShareVote fail, no consensus group') - // } + } catch (e) { this.mainLogger.error(`createAndShareVote: error ${e.message}`) } finally { @@ -3843,44 +1244,7 @@ class TransactionConsenus { this.crypto.hash(applyStatus) + accountsHash + proposal.appReceiptDataHash + proposal.executionShardKey ) return proposalHash - // } else if (this.stateManager.transactionQueue.usePOQo) { - // const appliedVote = vote as AppliedVote - // const appliedHash = { - // applied: appliedVote.transaction_result, - // cantApply: appliedVote.cant_apply - // } - // const stateHash = { - // account_id: appliedVote.account_id, - // account_state_hash_after: appliedVote.account_state_hash_after, - // account_state_hash_before: appliedVote.account_state_hash_before, - // } - // const appDataHash = { - // app_data_hash: appliedVote.app_data_hash, - // } - // const voteToHash = { - // appliedHash: this.crypto.hash(appliedHash), - // stateHash: this.crypto.hash(stateHash), - // appDataHash: this.crypto.hash(appDataHash), - // } - // return this.crypto.hash(voteToHash) - // } else if (this.stateManager.transactionQueue.useNewPOQ) { - // const appliedVote = vote as AppliedVote - // const voteToHash = { - // txId: appliedVote.txid, - // transaction_result: appliedVote.transaction_result, - // account_id: appliedVote.account_id, - // account_state_hash_after: appliedVote.account_state_hash_after, - // account_state_hash_before: appliedVote.account_state_hash_before, - // cant_apply: appliedVote.cant_apply, - // } - // return this.crypto.hash(voteToHash) - // } else { - // const appliedVote = vote as AppliedVote - // const voteToHash = Object.assign({}, appliedVote) - // if (voteToHash.node_id != null) voteToHash.node_id = '' - // if (voteToHash.sign != null) delete voteToHash.sign - // return this.crypto.hash(voteToHash) - // } + } addPendingConfirmOrChallenge(queueEntry: QueueEntry, confirmOrChallenge: ConfirmOrChallengeMessage): void { if (queueEntry.pendingConfirmOrChallenge.has(confirmOrChallenge.nodeId) === false) { @@ -3888,225 +1252,6 @@ class TransactionConsenus { } } - // DEPRECATED AFTER POQO - // /** - // * tryAppendMessage - // * if we have not seen this message yet search our list of votes and append it in - // * the correct spot sorted by signer's id - // * @param queueEntry - // * @param confirmOrChallenge - // */ - // tryAppendMessage(queueEntry: QueueEntry, confirmOrChallenge: ConfirmOrChallengeMessage): boolean { - // if (queueEntry.acceptVoteMessage === true) { /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryAppendMessage: ${queueEntry.logID} we are still accepting vote messages. Not ready`) - // this.addPendingConfirmOrChallenge(queueEntry, confirmOrChallenge) - // return false - // } - // if (queueEntry.robustQueryVoteCompleted === false) { - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tryAppendMessage: ${queueEntry.logID} robustQueryVoteCompleted: ${queueEntry.robustQueryVoteCompleted}. Not ready`) - // this.addPendingConfirmOrChallenge(queueEntry, confirmOrChallenge) - // return false - // } - // if (queueEntry.acceptConfirmOrChallenge === false || queueEntry.appliedReceipt2 != null) { - // this.mainLogger.debug( - // `tryAppendMessage: ${ - // queueEntry.logID - // } not accepting confirm or challenge. acceptConfirmOrChallenge: ${ - // queueEntry.acceptConfirmOrChallenge - // }, appliedReceipt2: ${queueEntry.appliedReceipt2 == null}` - // ) - // return false - // } - - // /* prettier-ignore */ - // if (logFlags.playback) this.logger.playbackLogNote("tryAppendMessage", `${queueEntry.logID}`, `collectedVotes: ${queueEntry.collectedVotes.length}`); - // /* prettier-ignore */ - // if (logFlags.debug) this.mainLogger.debug(`tryAppendMessage: ${queueEntry.logID} ${Utils.safeStringify(confirmOrChallenge)} `); - // // check if the node is in the execution group - // const isMessageFromExecutionNode = queueEntry.executionGroupMap.has(confirmOrChallenge.nodeId) - - // if (!isMessageFromExecutionNode) { - // this.mainLogger.error(`tryAppendMessage: ${queueEntry.logID} Message is not from an execution node.`) - // return false - // } - - // if (confirmOrChallenge.message === 'confirm') { - // const foundNode = - // queueEntry.eligibleNodeIdsToConfirm.has(confirmOrChallenge.nodeId) && - // this.crypto.verify( - // confirmOrChallenge as SignedObject, - // queueEntry.executionGroupMap.get(confirmOrChallenge.nodeId).publicKey - // ) - - // if (!foundNode) { - // this.mainLogger.error( - // `tryAppendMessage: ${queueEntry.logID} Message signature does not match with any eligible nodes that can confirm.` - // ) - // return false - // } - // } - - // // todo: podA check if the message is valid - // const isMessageValid = true - // if (!isMessageValid) return false - - // // Check if the previous phase is finalized and we have received best vote - // if (queueEntry.receivedBestVote == null) { - // this.mainLogger.error( - // `tryAppendMessage: ${queueEntry.logID} confirm/challenge is too early. Not finalized best vote yet` - // ) - // this.addPendingConfirmOrChallenge(queueEntry, confirmOrChallenge) - // return false - // } - - // // verify that the vote part of the message is for the same vote that was finalized in the previous phase - // if (this.calculateVoteHash(confirmOrChallenge.appliedVote) !== queueEntry.receivedBestVoteHash) { - // this.mainLogger.error( - // `tryAppendMessage: ${ - // queueEntry.logID - // } confirmOrChallenge is not for the same vote that was finalized in the previous phase, queueEntry.receivedBestVote: ${Utils.safeStringify( - // queueEntry.receivedBestVote - // )}` - // ) - // nestedCountersInstance.countEvent('confirmOrChallenge', 'not same vote as finalized vote') - // return false - // } - - // // record the timestamps - // const now = shardusGetTime() - // queueEntry.lastConfirmOrChallengeTimestamp = now - // if (queueEntry.firstConfirmOrChallengeTimestamp === 0) { - // queueEntry.firstConfirmOrChallengeTimestamp = now - - // if (this.stateManager.consensusLog) { - // this.mainLogger.info(`tryAppendMessage: ${queueEntry.logID} first confirm or challenge`) - // } - // } - - // if (confirmOrChallenge.message === 'confirm') { - // let isBetterThanCurrentConfirmation - // let receivedConfirmedNode: Shardus.NodeWithRank - - // queueEntry.topConfirmations.add(confirmOrChallenge.nodeId) - // if (this.stateManager.consensusLog) this.mainLogger.info( - // `tryAppendMessage: ${queueEntry.logID} current topConfirmations: ${queueEntry.topConfirmations.size}` - // ) - - // if (!queueEntry.receivedBestConfirmation) isBetterThanCurrentConfirmation = true - // else if (queueEntry.receivedBestConfirmation.nodeId === confirmOrChallenge.nodeId) - // isBetterThanCurrentConfirmation = false - // else { - // // Compare ranks - // if (queueEntry.executionGroupMap.has(confirmOrChallenge.nodeId)) { - // receivedConfirmedNode = queueEntry.executionGroupMap.get( - // confirmOrChallenge.nodeId - // ) as Shardus.NodeWithRank - // } - - // isBetterThanCurrentConfirmation = - // receivedConfirmedNode.rank < queueEntry.receivedBestConfirmedNode.rank - // } - - // if (!isBetterThanCurrentConfirmation) { - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryAppendMessage: ${queueEntry.logID} confirmation is not better than current confirmation` - // ) - // return false - // } - - // if (this.stateManager.consensusLog) - // this.mainLogger.debug( - // `tryAppendMessage: ${queueEntry.logID} better confirmation received and switching to it` - // ) - - // queueEntry.receivedBestConfirmation = confirmOrChallenge - - // if (receivedConfirmedNode) { - // queueEntry.receivedBestConfirmedNode = receivedConfirmedNode - // } else { - // if (queueEntry.executionGroupMap.has(confirmOrChallenge.nodeId)) { - // queueEntry.receivedBestConfirmedNode = queueEntry.executionGroupMap.get( - // confirmOrChallenge.nodeId - // ) as Shardus.NodeWithRank - // } - // } - - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryAppendMessage: ${ - // queueEntry.logID - // } confirmation received and processed. queueEntry.receivedBestConfirmation: ${Utils.safeStringify( - // queueEntry.receivedBestConfirmation - // )}, receivedBestConfirmedNode: ${queueEntry.receivedBestConfirmedNode}` - // ) - // return true - // } else if (confirmOrChallenge.message === 'challenge') { - // let isBetterThanCurrentChallenge = false - // let receivedChallenger: Shardus.NodeWithRank - - // // add the challenge to the queueEntry if it is from a unique node - // if (queueEntry.uniqueChallenges[confirmOrChallenge.sign.owner] == null) { - // queueEntry.uniqueChallenges[confirmOrChallenge.sign.owner] = confirmOrChallenge - // queueEntry.uniqueChallengesCount++ - // if (this.stateManager.consensusLog) - // this.mainLogger.debug( - // `tryAppendMessage: ${queueEntry.logID} unique challenge added. ${Utils.safeStringify( - // queueEntry.uniqueChallenges - // )}` - // ) - // } - - // this.mainLogger.debug( - // `tryAppendMessage: ${ - // queueEntry.logID - // } challenge received and processing. queueEntry.receivedBestChallenge: ${Utils.safeStringify( - // queueEntry.receivedBestChallenge - // )}` - // ) - // if (!queueEntry.receivedBestChallenge) isBetterThanCurrentChallenge = true - // else if (queueEntry.receivedBestChallenge.nodeId === confirmOrChallenge.nodeId) - // isBetterThanCurrentChallenge = false - // else { - // // Compare ranks - // if (queueEntry.executionGroupMap.has(confirmOrChallenge.nodeId)) { - // receivedChallenger = queueEntry.executionGroupMap.get( - // confirmOrChallenge.nodeId - // ) as Shardus.NodeWithRank - // } - // isBetterThanCurrentChallenge = receivedChallenger.rank < queueEntry.receivedBestChallenger.rank - // } - - // if (!isBetterThanCurrentChallenge) { - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryAppendMessage: ${queueEntry.logID} challenge is not better than current challenge` - // ) - // return false - // } - - // queueEntry.receivedBestChallenge = confirmOrChallenge - - // if (receivedChallenger) { - // queueEntry.receivedBestChallenger = receivedChallenger - // } else { - // if (queueEntry.executionGroupMap.has(confirmOrChallenge.nodeId)) { - // queueEntry.receivedBestChallenger = queueEntry.executionGroupMap.get( - // confirmOrChallenge.nodeId - // ) as Shardus.NodeWithRank - // } - // } - // if (logFlags.debug) - // this.mainLogger.debug( - // `tryAppendMessage: ${ - // queueEntry.logID - // } challenge received and processed. queueEntry.receivedBestChallenge: ${Utils.safeStringify( - // queueEntry.receivedBestChallenge - // )}, receivedBestChallenger: ${queueEntry.receivedBestChallenger}` - // ) - // return true - // } - // } - /** * tryAppendVote * if we have not seen this vote yet search our list of votes and append it in @@ -4163,102 +1308,6 @@ class TransactionConsenus { queueEntry.newVotes = true return true - // } else { - // if (queueEntry.acceptVoteMessage === false || queueEntry.appliedReceipt2 != null) { - // if (queueEntry.acceptVoteMessage === false) - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:f no_accept`) - // if (queueEntry.appliedReceipt2 != null) - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:f applied2_not_null`) - // return false - // } - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tryAppendVote', `${queueEntry.logID}`, `vote: ${utils.stringifyReduce(vote)}`) - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`tryAppendVote collectedVotes: ${queueEntry.logID} vote: ${utils.stringifyReduce(vote)}`) - - // const isEligibleToVote = - // queueEntry.eligibleNodeIdsToVote.has(vote.node_id) && - // this.crypto.verify(vote as SignedObject, queueEntry.executionGroupMap.get(vote.node_id).publicKey) - - // if (!isEligibleToVote) { - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:f not_eligible`) - // if (logFlags.debug) { - // this.mainLogger.debug( - // `tryAppendVote: logId:${ - // queueEntry.logID - // } received node is not part of eligible nodes to vote, vote: ${Utils.safeStringify( - // vote - // )}, eligibleNodesToVote: ${Utils.safeStringify(queueEntry.eligibleNodeIdsToVote)}` - // ) - // } - // return - // } - - // // todo: podA check if the vote is valid - // const isVoteValid = true - // if (!isVoteValid) return - - // queueEntry.topVoters.add(vote.node_id) - // // we will mark the last received vote timestamp - // const now = shardusGetTime() - // queueEntry.lastVoteReceivedTimestamp = now - // if (queueEntry.firstVoteReceivedTimestamp === 0) queueEntry.firstVoteReceivedTimestamp = now - - // // Compare with existing vote. Skip we already have it or node rank is lower than ours - // let isBetterThanCurrentVote - // let receivedVoter: Shardus.NodeWithRank - // if (!queueEntry.receivedBestVote){ - // isBetterThanCurrentVote = true - // //do not compare the hash we still need to allow gossip to flow if the hash is the - // //same but the vote is better. - // //else if (queueEntry.receivedBestVoteHash === this.calculateVoteHash(vote)){ - // } else { - // // Compare ranks - // if (queueEntry.executionGroupMap.has(vote.node_id)) { - // receivedVoter = queueEntry.executionGroupMap.get(vote.node_id) as Shardus.NodeWithRank - // } - // isBetterThanCurrentVote = receivedVoter.rank > queueEntry.receivedBestVoter.rank - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV ${receivedVoter.rank} > ${queueEntry.receivedBestVoter.rank}`) - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV ${NodeList.activeIdToPartition.get(receivedVoter.id)} : ${NodeList.activeIdToPartition.get(queueEntry.receivedBestVoter.id)}`) - // } - - // if (!isBetterThanCurrentVote) { - // if (logFlags.debug || this.stateManager.consensusLog) { - // this.mainLogger.debug( - // `tryAppendVote: ${queueEntry.logID} received vote is NOT better than current vote. lastReceivedVoteTimestamp: ${queueEntry.lastVoteReceivedTimestamp}` - // ) - // } - // if (receivedVoter) { - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:f worser_voter ${NodeList.activeIdToPartition.get(receivedVoter.id)}`) - // } else { - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:f worser_voter`) - // } - // return false - // } - - // queueEntry.receivedBestVote = vote - // queueEntry.receivedBestVoteHash = this.calculateVoteHash(vote) - // queueEntry.newVotes = true - // if (logFlags.debug || this.stateManager.consensusLog) { - // this.mainLogger.debug( - // `tryAppendVote: ${queueEntry.logID} received vote is better than current vote. lastReceivedVoteTimestamp: ${queueEntry.lastVoteReceivedTimestamp}` - // ) - // } - // if (receivedVoter) { - // queueEntry.receivedBestVoter = receivedVoter - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:t receivedVoter ${NodeList.activeIdToPartition.get(receivedVoter.id)}`) - // return true - // } else { - // if (queueEntry.executionGroupMap.has(vote.node_id)) { - // queueEntry.receivedBestVoter = queueEntry.executionGroupMap.get( - // vote.node_id - // ) as Shardus.NodeWithRank - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:t receivedVoter2 ${NodeList.activeIdToPartition.get(queueEntry.receivedBestVoter.id)}`) - // return true - // } - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455103 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: gossipHandlerAV:f no_receivedVoter`) - // return false - // } - // // No need to forward the gossip here as it's being done in the gossip handler - // } } tryAppendVoteHash(queueEntry: QueueEntry, voteHash: AppliedVoteHash): boolean { diff --git a/src/state-manager/TransactionQueue.archiver.ts b/src/state-manager/TransactionQueue.archiver.ts new file mode 100644 index 000000000..e31511a01 --- /dev/null +++ b/src/state-manager/TransactionQueue.archiver.ts @@ -0,0 +1,259 @@ +import { P2P as P2PTypes, Utils } from '@shardeum-foundation/lib-types' +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import * as Archivers from '../p2p/Archivers' +import { config as configContext } from '../p2p/Context' +import { getGlobalTxReceipt } from '../p2p/GlobalAccounts' +import * as Shardus from '../shardus/shardus-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { + ArchiverReceipt, + QueueEntry, + SignedReceipt, + WrappedResponses, +} from './state-manager-types' + +export const archiverMethods = { + async getArchiverReceiptFromQueueEntry(queueEntry: QueueEntry): Promise { + if (!queueEntry.preApplyTXResult || !queueEntry.preApplyTXResult.applyResponse) { + /* prettier-ignore */ if (logFlags.verbose) console.log('getArchiverReceiptFromQueueEntry : no preApplyTXResult or applyResponse, returning null receipt') + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'getArchiverReceiptFromQueueEntry no preApplyTXResult or applyResponse') + return null as ArchiverReceipt + } + + const txId = queueEntry.acceptedTx.txId + const timestamp = queueEntry.acceptedTx.timestamp + const globalModification = queueEntry.globalModification + + let signedReceipt = null as SignedReceipt | P2PTypes.GlobalAccountsTypes.GlobalTxReceipt + if (globalModification) { + signedReceipt = getGlobalTxReceipt(queueEntry.acceptedTx.txId) as P2PTypes.GlobalAccountsTypes.GlobalTxReceipt + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : globalModification signedReceipt txid', txId) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : globalModification signedReceipt signs', txId, Utils.safeStringify(signedReceipt.signs)) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : globalModification signedReceipt tx', txId, Utils.safeStringify(signedReceipt.tx)) + } else { + signedReceipt = this.stateManager.getSignedReceipt(queueEntry) as SignedReceipt + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt txid', txId) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt proposal', txId, Utils.safeStringify(signedReceipt.proposal)) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt proposalHash', txId, Utils.safeStringify(signedReceipt.proposalHash)) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt signaturePack', txId, Utils.safeStringify(signedReceipt.signaturePack)) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt voteOffsets', txId, Utils.safeStringify(signedReceipt.voteOffsets)) + } + if (!signedReceipt) { + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'getArchiverReceiptFromQueueEntry no signedReceipt') + /* prettier-ignore */ if (logFlags.important_as_error) console.log(`getArchiverReceiptFromQueueEntry: signedReceipt is null for txId: ${txId} timestamp: ${timestamp} globalModification: ${globalModification}`) + return null as ArchiverReceipt + } + + const accountsToAdd: { [accountId: string]: Shardus.AccountsCopy } = {} + const beforeAccountsToAdd: { [accountId: string]: Shardus.AccountsCopy } = {} + + if (globalModification) { + signedReceipt = signedReceipt as P2PTypes.GlobalAccountsTypes.GlobalTxReceipt + if (signedReceipt.tx && signedReceipt.tx.addressHash != '' && !beforeAccountsToAdd[signedReceipt.tx.address]) { + console.log(queueEntry.collectedData[signedReceipt.tx.address].stateId, signedReceipt.tx.addressHash) + if (queueEntry.collectedData[signedReceipt.tx.address].stateId === signedReceipt.tx.addressHash) { + const isGlobal = this.stateManager.accountGlobals.isGlobalAccount(signedReceipt.tx.addressHash) + const account = queueEntry.collectedData[signedReceipt.tx.address] + const accountCopy = { + accountId: account.accountId, + data: account.data, + hash: account.stateId, + timestamp: account.timestamp, + isGlobal, + } as Shardus.AccountsCopy + beforeAccountsToAdd[account.accountId] = accountCopy + } else { + console.log( + `getArchiverReceiptFromQueueEntry: before stateId does not match addressHash for txId: ${txId} timestamp: ${timestamp} globalModification: ${globalModification}` + ) + } + } + } else if (this.config.stateManager.includeBeforeStatesInReceipts) { + // simulate debug case + if (configContext.mode === 'debug' && configContext.debug.beforeStateFailChance > Math.random()) { + for (const accountId in queueEntry.collectedData) { + const account = queueEntry.collectedData[accountId] + account.stateId = 'debugFail2' + } + } + + const fileredBeforeStateToSend = [] + const badBeforeStateAccounts = [] + + for (const account of Object.values(queueEntry.collectedData)) { + if (typeof this.app.beforeStateAccountFilter !== 'function' || this.app.beforeStateAccountFilter(account)) { + fileredBeforeStateToSend.push(account.accountId) + } + } + + // prepare before state accounts + for (const accountId of fileredBeforeStateToSend) { + signedReceipt = signedReceipt as SignedReceipt + // check if our beforeState account hash is the same as the vote in the receipt2 + const index = signedReceipt.proposal.accountIDs.indexOf(accountId) + if (index === -1) continue + const account = queueEntry.collectedData[accountId] + if (account == null) { + badBeforeStateAccounts.push(accountId) + continue + } + if (account.stateId !== signedReceipt.proposal.beforeStateHashes[index]) { + badBeforeStateAccounts.push(accountId) + } + } + + if (badBeforeStateAccounts.length > 0) { + nestedCountersInstance.countEvent( + 'stateManager', + 'badBeforeStateAccounts in getArchiverReceiptFromQueueEntry', + badBeforeStateAccounts.length + ) + + // repair bad before state accounts + const wrappedResponses: WrappedResponses = await this.requestInitialData(queueEntry, badBeforeStateAccounts) + for (const accountId in wrappedResponses) { + queueEntry.collectedData[accountId] = wrappedResponses[accountId] + } + } + + // add before state accounts + for (const accountId of fileredBeforeStateToSend) { + const account = queueEntry.collectedData[accountId] + const isGlobal = this.stateManager.accountGlobals.isGlobalAccount(account.accountId) + const accountCopy = { + accountId: account.accountId, + data: account.data, + hash: account.stateId, + timestamp: account.timestamp, + isGlobal, + } as Shardus.AccountsCopy + beforeAccountsToAdd[account.accountId] = accountCopy + } + } + + let isAccountsMatchWithReceipt2 = true + const accountWrites = queueEntry.preApplyTXResult?.applyResponse?.accountWrites + + if (globalModification) { + if (accountWrites === null || accountWrites.length === 0) { + console.log('No account update in global Modification tx', txId, timestamp) + } + } else if ( + accountWrites != null && + accountWrites.length === (signedReceipt as SignedReceipt).proposal.accountIDs.length + ) { + signedReceipt = signedReceipt as SignedReceipt + for (const account of accountWrites) { + const indexInVote = signedReceipt.proposal.accountIDs.indexOf(account.accountId) + if (signedReceipt.proposal.afterStateHashes[indexInVote] !== account.data.stateId) { + // console.log('Found afterStateHash mismatch', account.accountId, receipt2.proposal.afterStateHashes[indexInVote], account.data.stateId) + isAccountsMatchWithReceipt2 = false + break + } + } + } else { + isAccountsMatchWithReceipt2 = false + } + + let finalAccounts = [] + let appReceiptData = queueEntry.preApplyTXResult?.applyResponse?.appReceiptData || null + if (isAccountsMatchWithReceipt2) { + finalAccounts = accountWrites + } else { + signedReceipt = signedReceipt as SignedReceipt + // request the final accounts and appReceiptData + let success = false + let count = 0 + const maxRetry = 3 + const nodesToAskKeys = signedReceipt.signaturePack?.map((signature) => signature.owner) + + // retry 3 times if the request fails + while (success === false && count < maxRetry) { + count++ + const requestedData = await this.requestFinalData( + queueEntry, + signedReceipt.proposal.accountIDs, + nodesToAskKeys, + true + ) + if (requestedData && requestedData.wrappedResponses && requestedData.appReceiptData) { + success = true + for (const accountId in requestedData.wrappedResponses) { + finalAccounts.push(requestedData.wrappedResponses[accountId]) + } + appReceiptData = requestedData.appReceiptData + } + } + } + + // override with the accounts in accountWrites + for (const account of finalAccounts) { + const isGlobal = this.stateManager.accountGlobals.isGlobalAccount(account.accountId) + const accountCopy = { + accountId: account.accountId, + data: account.data.data, + timestamp: account.timestamp, + hash: account.data.stateId, + isGlobal, + } as Shardus.AccountsCopy + accountsToAdd[account.accountId] = accountCopy + } + + // MIGHT NOT NEED THIS NOW WITH THE POQo RECEIPT REWRITE. NEED TO CONFIRM + // if (!globalModification && this.useNewPOQ === false) { + // appliedReceipt = appliedReceipt as AppliedReceipt2 + // if (appliedReceipt.appliedVote) { + // delete appliedReceipt.appliedVote.node_id + // delete appliedReceipt.appliedVote.sign + // delete appliedReceipt.confirmOrChallenge + // // Update the app_data_hash with the app_data_hash from the appliedVote + // appliedReceipt.app_data_hash = appliedReceipt.appliedVote.app_data_hash + // } + // } + + const archiverReceipt: ArchiverReceipt = { + tx: { + originalTxData: queueEntry.acceptedTx.data, + txId: queueEntry.acceptedTx.txId, + timestamp: queueEntry.acceptedTx.timestamp, + }, + signedReceipt, + appReceiptData, + beforeStates: [...Object.values(beforeAccountsToAdd)], + afterStates: [...Object.values(accountsToAdd)], + cycle: queueEntry.txGroupCycle, + globalModification, + } + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : archiverReceipt', txId, Utils.safeStringify(archiverReceipt)) + /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : originalTxData object', txId, Utils.safeStringify(archiverReceipt.tx.originalTxData)) + + return archiverReceipt + }, + addOriginalTxDataToForward(queueEntry: QueueEntry): void { + if (logFlags.verbose) console.log('originalTxData', queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp) + const { acceptedTx } = queueEntry + const originalTxData = { + txId: acceptedTx.txId, + originalTxData: acceptedTx.data, + cycle: queueEntry.cycleToRecordOn, + timestamp: acceptedTx.timestamp, + } + // const signedOriginalTxData: any = this.crypto.sign(originalTxData) // maybe we don't need to send by signing it + Archivers.instantForwardOriginalTxData(originalTxData) + }, + + async addReceiptToForward(queueEntry: QueueEntry, debugString = ''): Promise { + if (logFlags.verbose) + console.log('addReceiptToForward', queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp, debugString) + const archiverReceipt = await this.getArchiverReceiptFromQueueEntry(queueEntry) + Archivers.instantForwardReceipts([archiverReceipt]) + this.receiptsForwardedTimestamp = shardusGetTime() + this.forwardedReceiptsByTimestamp.set(this.receiptsForwardedTimestamp, archiverReceipt) + // this.receiptsToForward.push(archiverReceipt) + }, + + getReceiptsToForward(): ArchiverReceipt[] { + return [...this.forwardedReceiptsByTimestamp.values()] + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.context.ts b/src/state-manager/TransactionQueue.context.ts new file mode 100644 index 000000000..451e54f9b --- /dev/null +++ b/src/state-manager/TransactionQueue.context.ts @@ -0,0 +1,82 @@ +import { ProcessQueueStats, QueueEntry, SeenAccounts } from './state-manager-types' +import * as Shardus from '../shardus/shardus-types' +import { P2PModuleContext } from '../p2p/Context' + +export interface TransactionQueueContext { + // State management properties + lastProcessStats: { [limitName: string]: ProcessQueueStats } + queueReads: Set + queueWrites: Set + queueReadWritesOld: Set + pendingTransactionQueue: QueueEntry[] + transactionProcessingQueueRunning: boolean + largePendingQueueReported: boolean + stateManager: any + isStuckProcessing: boolean + debugLastProcessingQueueStartTime: number + processingLastRunTime: number + processingMinRunBreak: number + transactionQueueHasRemainingWork: boolean + profiler: any + _transactionQueue: QueueEntry[] + queueRestartCounter: number + app: Shardus.App + queueTimingFixes: boolean + pendingTransactionQueueByID: Map + _transactionQueueByID: Map + logger: any + mainLogger: any + seqLogger: any + config: Shardus.StrictServerConfiguration + queueStopped: boolean + debugLastAwaitedCall: string + + // Additional properties from second interface + queueEntryCounter: number + p2p: P2PModuleContext + executeInOneShard: boolean + usePOQo: boolean + useNewPOQ: boolean + archivedQueueEntries: QueueEntry[] + archivedQueueEntriesByID: Map + archivedQueueEntryMaxCount: number + crypto: any + + // Methods + statemanager_fatal: (key: string, log: string) => void + updateTxState: (queueEntry: QueueEntry, state: string, note?: string) => void + processQueueEntry: (queueEntry: QueueEntry, processStats: ProcessQueueStats, seenAccounts: SeenAccounts) => Promise + archiveQueueEntry: (queueEntry: QueueEntry) => void + checkReadyForTxApply: () => Promise + processQueue_accountSeen: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => boolean + processQueue_getUpstreamTx: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => QueueEntry | null + processQueue_markAccountsSeen: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => void + processQueue_accountSeen2: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => boolean + processQueue_markAccountsSeen2: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => void + processQueue_clearAccountsSeen: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => void + processQueue_debugAccountData: (queueEntry: QueueEntry, app: Shardus.App) => string + removeFromQueue: (queueEntry: QueueEntry, currentIndex: number) => void + setTXExpired: (queueEntry: QueueEntry, currentIndex: number, message: string) => void + setTxAlmostExpired: (queueEntry: QueueEntry, currentIndex: number, message: string) => void + + // Additional methods from second interface + isAccountInQueue: (accountId: string) => boolean + txDebugStartTiming: (queueEntry: QueueEntry, tag: string) => void + txDebugMarkStartTime: (queueEntry: QueueEntry, tag: string) => void + isTxInPendingNonceQueue: (accountId: string, txId: string) => boolean + addTransactionToNonceQueue: (nonceQueueItem: any) => { success: boolean; reason?: string; alreadyAdded?: boolean } + getQueueEntrySafe: (txId: string) => QueueEntry | null + updateHomeInformation: (queueEntry: QueueEntry) => void + orderNodesByRank: (nodes: any[], key: string) => any[] + computeNodeRank: (node: any, key: string) => number + queueEntryGetTransactionGroup: (queueEntry: QueueEntry, tryUpdate?: boolean) => Shardus.Node[] + queueEntryGetConsensusGroup: (queueEntry: QueueEntry) => Shardus.Node[] + getStartAndEndIndexOfTargetGroup: (targetGroup: string[], transactionGroup: any[]) => { startIndex: number; endIndex: number } + addOriginalTxDataToForward: (queueEntry: QueueEntry) => void + computeTxSieveTime: (queueEntry: QueueEntry) => number + queueEntryPrePush: (queueEntry: QueueEntry) => void + shareCompleteDataToNeighbours: (queueEntry: QueueEntry) => Promise + queueEntryAddData: (queueEntry: QueueEntry, data: Shardus.WrappedResponse, signatureCheck?: boolean) => void + txDebugMarkEndTime: (queueEntry: QueueEntry, tag: string) => void + dumpTxDebugToStatList: (queueEntry: QueueEntry) => void +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.core.commiting.ts b/src/state-manager/TransactionQueue.core.commiting.ts new file mode 100644 index 000000000..64fae5aa6 --- /dev/null +++ b/src/state-manager/TransactionQueue.core.commiting.ts @@ -0,0 +1,175 @@ +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import { config as configContext } from '../p2p/Context' +import * as utils from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { + QueueEntry, + SeenAccounts, +} from './state-manager-types' +import TransactionQueue, { DebugComplete } from './TransactionQueue' + +export async function handleCommitingState( + this: TransactionQueue, + queueEntry: QueueEntry, + currentIndex: number, + seenAccounts: SeenAccounts, + currentTime: number, + processStats: any, + shortID: string, + localRestartCounter: number, + app: any +) { +///////////////////////////////////////////--commiting--//////////////////////////////////////////////////////////////// +if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + + // TODO STATESHARDING4 Check if we have already commited the data from a receipt we saw earlier + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting : ${queueEntry.logID} `) + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_commitingTx', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + + // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug( ` processAcceptedTxQueue2. ${queueEntry.entryID} timestamp: ${queueEntry.txKeys.timestamp}`) + + if (queueEntry.debugFail_failNoRepair) { + this.updateTxState(queueEntry, 'fail') + this.removeFromQueue(queueEntry, currentIndex) + nestedCountersInstance.countEvent('stateManager', 'debugFail_failNoRepair') + this.statemanager_fatal( + `processAcceptedTxQueue_debugFail_failNoRepair`, + `processAcceptedTxQueue_debugFail_failNoRepair tx: ${shortID} cycle:${ + queueEntry.cycleToRecordOn + } accountkeys: ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}` + ) + this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) + return + } + + const wrappedStates = queueEntry.collectedData // Object.values(queueEntry.collectedData) + + try { + let canCommitTX = true + let hasReceiptFail = false + if (queueEntry.noConsensus === true) { + // dont have a receipt for a non consensus TX. not even sure if we want to keep that! + if (queueEntry.preApplyTXResult.passed === false) { + canCommitTX = false + } + } else if (queueEntry.signedReceipt != null) { + // the final state of the queue entry will be pass or fail based on the receipt + if (queueEntry.signedReceipt.proposal.applied === false) { + canCommitTX = false + hasReceiptFail = true + } + } else if (queueEntry.receivedSignedReceipt != null) { + // the final state of the queue entry will be pass or fail based on the receipt + if (queueEntry.receivedSignedReceipt.proposal.applied === false) { + canCommitTX = false + if (configContext.stateManager.receiptRemoveFix) { + hasReceiptFail = true + } else { + hasReceiptFail = false + } + } + } else { + canCommitTX = false + } + + nestedCountersInstance.countEvent( + 'stateManager', + `canCommitTX: ${canCommitTX}, hasReceiptFail: ${hasReceiptFail}` + ) + + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.debug) this.mainLogger.debug('shrd_commitingTx', `${shortID}`, `canCommitTX: ${canCommitTX}, hasReceiptFail: ${hasReceiptFail}`) + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_commitingTx', `${shortID}`, `canCommitTX: ${canCommitTX} `) + if (canCommitTX) { + // this.mainLogger.debug(` processAcceptedTxQueue2. applyAcceptedTransaction ${queueEntry.entryID} timestamp: ${queueEntry.txKeys.timestamp} queuerestarts: ${localRestartCounter} queueLen: ${this.newAcceptedTxQueue.length}`) + + // Need to go back and thing on how this was supposed to work: + //queueEntry.acceptedTx.transactionGroup = queueEntry.transactionGroup // Used to not double count txProcessed + + //try { + this.profiler.profileSectionStart('commit') + + const awaitStart = shardusGetTime() + /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.commitConsensedTransaction()' ) + await this.commitConsensedTransaction(queueEntry) + /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.commitConsensedTransaction()', DebugComplete.Completed ) + this.updateSimpleStatsObject( + processStats.awaitStats, + 'commitConsensedTransaction', + shardusGetTime() - awaitStart + ) + + if (queueEntry.repairFinished) { + // saw a TODO comment above and befor I axe it want to confirm what is happening after we repair a receipt. + // shouldn't get here putting this in to catch if we do + this.statemanager_fatal(`processAcceptedTxQueue_commitingRepairedReceipt`, `${shortID} `) + nestedCountersInstance.countEvent('processing', 'commiting a repaired TX...') + } + + nestedCountersInstance.countEvent('stateManager', 'committed tx') + if (queueEntry.hasValidFinalData === false) { + nestedCountersInstance.countEvent('stateManager', 'commit state fix FinalDataFlag') + queueEntry.hasValidFinalData = true + } + + //} finally { + this.profiler.profileSectionEnd('commit') + //} + } + if (logFlags.verbose) + console.log('commit commit', queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp) + if (this.config.p2p.experimentalSnapshot) this.addReceiptToForward(queueEntry, 'commit') + + if (hasReceiptFail) { + // endpoint to allow dapp to execute something that depends on a transaction failing + + const applyReponse = queueEntry.preApplyTXResult.applyResponse // TODO STATESHARDING4 ... if we get here from a non standard path may need to get this data from somewhere else + + this.app.transactionReceiptFail(queueEntry.acceptedTx.data, wrappedStates, applyReponse) + } + } catch (ex) { + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 commiting Transaction:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) + this.statemanager_fatal( + `processAcceptedTxQueue2b_ex`, + 'processAcceptedTxQueue2 commiting Transaction:' + ex.name + ': ' + ex.message + ' at ' + ex.stack + ) + } finally { + this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) + + if (queueEntry.noConsensus === true) { + // dont have a receipt for a non consensus TX. not even sure if we want to keep that! + if (queueEntry.preApplyTXResult.passed === true) { + this.updateTxState(queueEntry, 'pass') + } else { + this.updateTxState(queueEntry, 'fail') + } + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting finished : noConsensus:${queueEntry.state} ${queueEntry.logID} `) + } else if (queueEntry.signedReceipt != null) { + // the final state of the queue entry will be pass or fail based on the receipt + if (queueEntry.signedReceipt.proposal.applied === true) { + this.updateTxState(queueEntry, 'pass') + } else { + this.updateTxState(queueEntry, 'fail') + } + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting finished : Recpt:${queueEntry.state} ${queueEntry.logID} `) + } else if (queueEntry.receivedSignedReceipt != null) { + // the final state of the queue entry will be pass or fail based on the receipt + if (queueEntry.receivedSignedReceipt.proposal.applied === true) { + this.updateTxState(queueEntry, 'pass') + } else { + this.updateTxState(queueEntry, 'fail') + } + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting finished : recvRecpt:${queueEntry.state} ${queueEntry.logID} `) + } else { + this.updateTxState(queueEntry, 'fail') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`processAcceptedTxQueue2 commiting finished : no receipt ${queueEntry.logID} `) + } + + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_commitingTxFinished', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + + //moved to end of finally because this does some compacting on the queue entry + this.removeFromQueue(queueEntry, currentIndex) + } + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.core.consensing.ts b/src/state-manager/TransactionQueue.core.consensing.ts new file mode 100644 index 000000000..d0a524dc2 --- /dev/null +++ b/src/state-manager/TransactionQueue.core.consensing.ts @@ -0,0 +1,270 @@ +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import { config as configContext } from '../p2p/Context' +import * as utils from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { + QueueEntry, + SeenAccounts, + SignedReceipt, +} from './state-manager-types' +import TransactionQueue from './TransactionQueue' + +export async function handleConsensingState( + this: TransactionQueue, + queueEntry: QueueEntry, + seenAccounts: SeenAccounts, + currentTime: number, + processStats: any, + hasReceivedApplyReceipt: boolean, + currentIndex: number, + shortID: string, + hasApplyReceipt: boolean, + txAge: number, + timeM5: number +) { + /////////////////////////////////////////--consensing--////////////////////////////////////////////////////////////////// + if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + + let didNotMatchReceipt = false + + let finishedConsensing = false + let result: SignedReceipt + + // if (this.usePOQo) { + // Try to produce receipt + // If receipt made, tellx128 it to execution group + // that endpoint should then factTellCorrespondingNodesFinalData + const receipt2 = queueEntry.receivedSignedReceipt ?? queueEntry.signedReceipt + if (receipt2 != null) { + if (logFlags.debug) + this.mainLogger.debug( + `processAcceptedTxQueue2 consensing : ${queueEntry.logID} receiptRcv:${hasReceivedApplyReceipt}` + ) + nestedCountersInstance.countEvent(`consensus`, 'tryProduceReceipt receipt2 != null') + //we have a receipt2, so we can make a receipt + result = queueEntry.signedReceipt + } else { + result = await this.stateManager.transactionConsensus.tryProduceReceipt(queueEntry) + } + + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt result : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) + + //todo this is false.. and prevents some important stuff. + //need to look at appliedReceipt2 + const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) + if (signedReceipt != null) { + //TODO share receipt with corresponding index + + if (logFlags.debug || this.stateManager.consensusLog) { + this.mainLogger.debug( + `processAcceptedTxQueue2 tryProduceReceipt final result : ${ + queueEntry.logID + } ${utils.stringifyReduce(result)}` + ) + } + + const isReceiptMatchPreApply = this.stateManager.transactionConsensus.hasAppliedReceiptMatchingPreApply( + queueEntry, + result + ) + if (logFlags.debug || this.stateManager.consensusLog) { + this.mainLogger.debug( + `processAcceptedTxQueue2 tryProduceReceipt isReceiptMatchPreApply : ${queueEntry.logID} ${isReceiptMatchPreApply}` + ) + } + + // not a challenge receipt but check the tx result + if (isReceiptMatchPreApply && queueEntry.isInExecutionHome) { + nestedCountersInstance.countEvent('consensus', 'hasAppliedReceiptMatchingPreApply: true') + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_madeReceipt', `${shortID}`, `qId: ${queueEntry.entryID} `) + + //todo check cant_apply flag to make sure a vote can form with it! + //also check if failed votes will work...? + if ( + this.stateManager.getReceiptProposal(queueEntry).cant_preApply === false && + this.stateManager.getReceiptResult(queueEntry) === true + ) { + this.updateTxState(queueEntry, 'commiting') + queueEntry.hasValidFinalData = true + finishedConsensing = true + } else { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt1', `${shortID}`, `qId: ${queueEntry.entryID} `) + // we are finished since there is nothing to apply + if (logFlags.debug || this.stateManager.consensusLog) { + /* prettier-ignore */ this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) + /* prettier-ignore */ this.statemanager_fatal(`processAcceptedTxQueue2`, `tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) + } + nestedCountersInstance.countEvent( + 'consensus', + 'tryProduceReceipt failed result = false or' + ' challenged' + ) + this.updateTxState(queueEntry, 'fail') + this.removeFromQueue(queueEntry, currentIndex) + return + } + + if ( + queueEntry.globalModification === false && + finishedConsensing === true && + this.executeInOneShard && + queueEntry.isInExecutionHome + ) { + //forward all finished data to corresponding nodes + const awaitStart = shardusGetTime() + // This is an async function but we do not await it + if (configContext.stateManager.attachDataToReceipt === false) { + if (configContext.p2p.useFactCorrespondingTell) { + this.factTellCorrespondingNodesFinalData(queueEntry) + } + // else { + // this.tellCorrespondingNodesFinalData(queueEntry) + // } + } + this.updateSimpleStatsObject( + processStats.awaitStats, + 'tellCorrespondingNodesFinalData', + shardusGetTime() - awaitStart + ) + } + //continue + } else { + nestedCountersInstance.countEvent( + 'consensus', + `hasAppliedReceiptMatchingPreApply: false, isInExecutionHome: ${queueEntry.isInExecutionHome}` + ) + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_gotReceiptNoMatch1', `${shortID}`, `qId: ${queueEntry.entryID} `) + if (this.stateManager.getReceiptResult(queueEntry) === false) { + // We got a reciept, but the consensus is that this TX was not applied. + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt2', `${shortID}`, `qId: ${queueEntry.entryID} `) + // we are finished since there is nothing to apply + /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal( + `consensing: on a failed receipt`, + `consensing: got a failed receipt for ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` + ) + if (logFlags.debug || this.stateManager.consensusLog) { + /* prettier-ignore */ this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) + /* prettier-ignore */ this.statemanager_fatal(`processAcceptedTxQueue2`, `tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) + } + nestedCountersInstance.countEvent('consensus', 'consensed on failed result') + this.updateTxState(queueEntry, 'fail') + this.removeFromQueue(queueEntry, currentIndex) + return + } + didNotMatchReceipt = true + queueEntry.signedReceiptForRepair = result + + // queueEntry.appliedReceiptForRepair2 = this.stateManager.getReceipt2(queueEntry) + if (queueEntry.isInExecutionHome === false && queueEntry.signedReceipt != null) { + if (this.stateManager.consensusLog) + this.mainLogger.debug( + `processTransactions ${queueEntry.logID} we are not execution home, but we have a receipt2, go to await final data` + ) + this.updateTxState(queueEntry, 'await final data', 'processTx7') + } + } + } + if (finishedConsensing === false) { + // if we got a reciept while waiting see if we should use it (if our own vote matches) + if (hasReceivedApplyReceipt && queueEntry.receivedSignedReceipt != null) { + if ( + this.stateManager.transactionConsensus.hasAppliedReceiptMatchingPreApply( + queueEntry, + queueEntry.receivedSignedReceipt + ) + ) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_gotReceipt', `${shortID}`, `qId: ${queueEntry.entryID} `) + + //todo check cant_apply flag to make sure a vote can form with it! + if ( + this.stateManager.getReceiptProposal(queueEntry).cant_preApply === false && + this.stateManager.getReceiptResult(queueEntry) === true + ) { + this.updateTxState(queueEntry, 'commiting') + queueEntry.hasValidFinalData = true + finishedConsensing = true + } else { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt2', `${shortID}`, `qId: ${queueEntry.entryID} `) + // we are finished since there is nothing to apply + //this.statemanager_fatal(`consensing: repairToMatchReceipt failed`, `consensing: repairToMatchReceipt failed ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} recievedAppliedReceipt:${hasReceivedApplyReceipt} age:${txAge}`) + this.removeFromQueue(queueEntry, currentIndex) + this.updateTxState(queueEntry, 'fail') + return + } + + //continue + } else { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_gotReceiptNoMatch2', `${shortID}`, `qId: ${queueEntry.entryID} `) + didNotMatchReceipt = true + queueEntry.signedReceiptForRepair = queueEntry.receivedSignedReceipt + + // queueEntry.appliedReceiptForRepair2 = this.stateManager.getReceipt2(queueEntry) + queueEntry.signedReceiptForRepair = this.stateManager.getSignedReceipt(queueEntry) + } + } else { + //just keep waiting for a reciept + if (this.config.p2p.stuckNGTInQueueFix && queueEntry.isNGT && txAge > timeM5) { + // entry is an NGT so we want to remove it if consensing fails to prevent from getting stuck + nestedCountersInstance.countEvent(`consensus`, 'removing NGT from queue after failed consensing') + this.updateTxState(queueEntry, 'fail') + this.removeFromQueue(queueEntry, currentIndex) + this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) + return + } + } + + // we got a receipt but did not match it. + if (didNotMatchReceipt === true && queueEntry.isInExecutionHome) { + nestedCountersInstance.countEvent('stateManager', 'didNotMatchReceipt') + if (queueEntry.debugFail_failNoRepair) { + this.updateTxState(queueEntry, 'fail') + this.removeFromQueue(queueEntry, currentIndex) + nestedCountersInstance.countEvent('stateManager', 'debugFail_failNoRepair') + this.statemanager_fatal( + `processAcceptedTxQueue_debugFail_failNoRepair2`, + `processAcceptedTxQueue_debugFail_failNoRepair2 tx: ${shortID} cycle:${ + queueEntry.cycleToRecordOn + } accountkeys: ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}` + ) + this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) + return + } + + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_didNotMatchReceipt', `${shortID}`, `qId: ${queueEntry.entryID} result:${queueEntry.signedReceiptForRepair.proposal.applied} `) + queueEntry.repairFinished = false + if (queueEntry.signedReceiptForRepair.proposal.applied === true) { + // need to start repair process and wait + //await note: it is best to not await this. it should be an async operation. + if ( + configContext.stateManager.noRepairIfDataAttached && + configContext.stateManager.attachDataToReceipt + ) { + // we have received the final data, so we can just go to "await final data" and commit the accounts + this.updateTxState(queueEntry, 'await final data') + } else { + this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) + this.updateTxState(queueEntry, 'await repair') + } + return + } else { + // We got a reciept, but the consensus is that this TX was not applied. + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt3', `${shortID}`, `qId: ${queueEntry.entryID} `) + // we are finished since there is nothing to apply + this.statemanager_fatal( + `consensing: repairToMatchReceipt failed`, + `consensing: repairToMatchReceipt failed ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` + ) + this.removeFromQueue(queueEntry, currentIndex) + this.updateTxState(queueEntry, 'fail') + return + } + } + } + } else { + nestedCountersInstance.countEvent('consensus', 'busy waiting') + } + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.core.ts b/src/state-manager/TransactionQueue.core.ts new file mode 100644 index 000000000..7dbd7b84a --- /dev/null +++ b/src/state-manager/TransactionQueue.core.ts @@ -0,0 +1,1487 @@ +import { nestedCountersInstance } from '../utils/nestedCounters' +import { shardusGetTime, ipInfo } from '../network' +import * as utils from '../utils' +import { withTimeout } from '../utils' +import { logFlags } from '../logger' +import * as NodeList from '../p2p/NodeList' +import * as Self from '../p2p/Self' +import { SeenAccounts, ProcessQueueStats, QueueEntry, PreApplyAcceptedTransactionResult, SignedReceipt } from './state-manager-types' +import * as Shardus from '../shardus/shardus-types' +import { config as configContext } from '../p2p/Context' +import { profilerInstance } from '../utils/profiler' +import ShardFunctions from './shardFunctions' +import { StateManager as StateManagerTypes } from '@shardeum-foundation/lib-types' +import { Utils } from '@shardeum-foundation/lib-types' +import { handleConsensingState } from './TransactionQueue.core.consensing' +import { handleCommitingState } from './TransactionQueue.core.commiting' + +enum DebugComplete { + Incomplete = 0, + Completed = 1, +} + +export const coreMethods = { + async processTransactions(firstTime = false): Promise { + const seenAccounts: SeenAccounts = {} + let pushedProfilerTag = null + const startTime = shardusGetTime() + + const processStats: ProcessQueueStats = { + totalTime: 0, + inserted: 0, + sameState: 0, + stateChanged: 0, + //expired:0, + sameStateStats: {}, + stateChangedStats: {}, + awaitStats: {}, + } + + //this may help in the case where the queue has halted + this.lastProcessStats['current'] = processStats + + this.queueReads = new Set() + this.queueWrites = new Set() + this.queueReadWritesOld = new Set() + + try { + nestedCountersInstance.countEvent('processing', 'processing-enter') + + if (this.pendingTransactionQueue.length > 5000) { + /* prettier-ignore */ nestedCountersInstance.countEvent( 'stateManager', `newAcceptedTxQueueTempInjest>5000 leftRunning:${this.transactionProcessingQueueRunning} noShardCalcs:${ this.stateManager.currentCycleShardData == null } ` ) + + //report rare counter once + if (this.largePendingQueueReported === false) { + this.largePendingQueueReported = true + /* prettier-ignore */ nestedCountersInstance.countRareEvent( 'stateManager', `newAcceptedTxQueueTempInjest>5000 leftRunning:${this.transactionProcessingQueueRunning} noShardCalcs:${ this.stateManager.currentCycleShardData == null } ` ) + } + } + + if (this.transactionProcessingQueueRunning === true) { + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'newAcceptedTxQueueRunning === true') + return + } + this.transactionProcessingQueueRunning = true + this.isStuckProcessing = false + this.debugLastProcessingQueueStartTime = shardusGetTime() + + // ensure there is some rest between processing loops + const timeSinceLastRun = startTime - this.processingLastRunTime + if (timeSinceLastRun < this.processingMinRunBreak) { + const sleepTime = Math.max(5, this.processingMinRunBreak - timeSinceLastRun) + await utils.sleep(sleepTime) + nestedCountersInstance.countEvent('processing', 'resting') + } + + if (this.transactionQueueHasRemainingWork && timeSinceLastRun > 500) { + /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal(`processAcceptedTxQueue left busy and waited too long to restart`, `processAcceptedTxQueue left busy and waited too long to restart ${timeSinceLastRun / 1000} `) + } + + this.profiler.profileSectionStart('processQ') + + if (logFlags.seqdiagram) + this.mainLogger.info( + `0x10052024 ${ipInfo.externalIp} ${shardusGetTime()} 0x0000 processTransactions _transactionQueue.length ${ + this._transactionQueue.length + }` + ) + + if (this.stateManager.currentCycleShardData == null) { + nestedCountersInstance.countEvent('stateManager', 'currentCycleShardData == null early exit') + return + } + + if (this._transactionQueue.length === 0 && this.pendingTransactionQueue.length === 0) { + return + } + + if (this.queueRestartCounter == null) { + this.queueRestartCounter = 0 + } + this.queueRestartCounter++ + + const localRestartCounter = this.queueRestartCounter + + const timeM = this.stateManager.queueSitTime + // const timeM2 = timeM * 2 // 12s + // const timeM2_5 = timeM * 2.25 // 13.5s + // const timeM3 = timeM * 2.5 // 15s + const timeM2 = timeM * 2 + const timeM2_5 = timeM * 2.5 + const timeM3 = timeM * 3 + const timeM5 = timeM * 5 + let currentTime = shardusGetTime() + + const app = this.app + + // process any new queue entries that were added to the temporary list + if (this.pendingTransactionQueue.length > 0) { + for (const txQueueEntry of this.pendingTransactionQueue) { + nestedCountersInstance.countEvent('stateManager', 'processAcceptedTxQueue injest: kept TX') + + const timestamp = txQueueEntry.txKeys.timestamp + const acceptedTx = txQueueEntry.acceptedTx + const txId = acceptedTx.txId + // Find the time sorted spot in our queue to insert this TX into + // reverse loop because the news (largest timestamp) values are at the end of the array + // todo faster version (binary search? to find where we need to insert) + let index = this._transactionQueue.length - 1 + // eslint-disable-next-line security/detect-object-injection + let lastTx = this._transactionQueue[index] + while ( + index >= 0 && + (timestamp > lastTx.txKeys.timestamp || + (timestamp === lastTx.txKeys.timestamp && txId < lastTx.acceptedTx.txId)) + ) { + index-- + // eslint-disable-next-line security/detect-object-injection + lastTx = this._transactionQueue[index] + } + + const age = shardusGetTime() - timestamp + if (age > timeM * 0.9) { + // IT turns out the correct thing to check is didSync flag only report errors if we did not wait on this TX while syncing + if (txQueueEntry.didSync == false) { + /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal(`processAcceptedTxQueue_oldTX.9 fromClient:${txQueueEntry.fromClient}`, `processAcceptedTxQueue cannot accept tx older than 0.9M ${timestamp} age: ${age} fromClient:${txQueueEntry.fromClient}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_processAcceptedTxQueueTooOld1', `${utils.makeShortHash(txQueueEntry.acceptedTx.txId)}`, 'processAcceptedTxQueue working on older tx ' + timestamp + ' age: ' + age) + //txQueueEntry.waitForReceiptOnly = true + } + } + if (age > timeM) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_processAcceptedTxQueueTooOld2', `${utils.makeShortHash(txQueueEntry.acceptedTx.txId)}`, 'processAcceptedTxQueue working on older tx ' + timestamp + ' age: ' + age) + nestedCountersInstance.countEvent('processing', 'txExpired1 > M. waitForReceiptOnly') + txQueueEntry.waitForReceiptOnly = true + if (this.config.stateManager.txStateMachineChanges) { + this.updateTxState(txQueueEntry, 'await final data', 'processTx1') + } else { + this.updateTxState(txQueueEntry, 'consensing') + } + } + + // do not injest tranactions that are long expired. there could be 10k+ of them if we are restarting the processing queue + if (age > timeM3 * 5 && this.stateManager.config.stateManager.discardVeryOldPendingTX === true) { + nestedCountersInstance.countEvent('txExpired', 'txExpired3 > M3 * 5. pendingTransactionQueue') + + continue + } + + txQueueEntry.approximateCycleAge = this.stateManager.currentCycleShardData.cycleNumber + //insert this tx into the main queue + this._transactionQueue.splice(index + 1, 0, txQueueEntry) + this._transactionQueueByID.set(txQueueEntry.acceptedTx.txId, txQueueEntry) + + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txQueueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: aging`) + + processStats.inserted++ + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_addToQueue', `${txId}`, `AcceptedTransaction: ${txQueueEntry.logID} ts: ${txQueueEntry.txKeys.timestamp} acc: ${utils.stringifyReduce(txQueueEntry.txKeys.allKeys)} indexInserted: ${index + 1}`) + this.stateManager.eventEmitter.emit('txQueued', acceptedTx.txId) + } + this.pendingTransactionQueue = [] + this.pendingTransactionQueueByID.clear() + } + + let currentIndex = this._transactionQueue.length - 1 + + let lastLog = 0 + currentIndex++ //increment once so we can handle the decrement at the top of the loop and be safe about continue statements + + let lastRest = shardusGetTime() + while (this._transactionQueue.length > 0) { + // update current time with each pass through the loop + currentTime = shardusGetTime() + + if (currentTime - lastRest > 1000) { + //add a brief sleep if we have been in this loop for a long time + nestedCountersInstance.countEvent('processing', 'forcedSleep') + await utils.sleep(5) //5ms sleep + lastRest = currentTime + + if ( + currentTime - this.stateManager.currentCycleShardData.calculationTime > + this.config.p2p.cycleDuration * 1000 + 5000 + ) { + nestedCountersInstance.countEvent('processing', 'old cycle data >5s past due') + } + if ( + currentTime - this.stateManager.currentCycleShardData.calculationTime > + this.config.p2p.cycleDuration * 1000 + 11000 + ) { + nestedCountersInstance.countEvent('processing', 'very old cycle data >11s past due') + return //loop will restart. + } + } + + //Handle an odd case where the finally did not catch exiting scope. + if (pushedProfilerTag != null) { + this.profiler.profileSectionEnd(`process-${pushedProfilerTag}`) + this.profiler.profileSectionEnd(`process-patched1-${pushedProfilerTag}`) + pushedProfilerTag = null + } + + currentIndex-- + if (currentIndex < 0) { + break + } + + this.clearDebugAwaitStrings() + + // eslint-disable-next-line security/detect-object-injection + const queueEntry: QueueEntry | undefined = this._transactionQueue[currentIndex] + if (queueEntry == null) { + this.statemanager_fatal(`queueEntry is null`, `currentIndex:${currentIndex}`) + nestedCountersInstance.countEvent('processing', 'error: null queue entry. skipping to next TX') + continue + } + if (logFlags.seqdiagram) + this.mainLogger.info( + `0x10052024 ${ipInfo.externalIp} ${shardusGetTime()} 0x0001 currentIndex:${currentIndex} txId:${ + queueEntry.acceptedTx.txId + } state:${queueEntry.state}` + ) + const txTime = queueEntry.txKeys.timestamp + const txAge = currentTime - txTime + + this.debugRecentQueueEntry = queueEntry + + // current queue entry is younger than timeM, so nothing to do yet. + if (txAge < timeM) { + break + } + + if (localRestartCounter < this.queueRestartCounter && lastLog !== this.queueRestartCounter) { + lastLog = this.queueRestartCounter + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('queueRestart_error', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} qrstGlobal:${this.queueRestartCounter}}`) + } + + this.stateManager.debugTXHistory[queueEntry.logID] = queueEntry.state + const hasApplyReceipt = queueEntry.signedReceipt != null + const hasReceivedApplyReceipt = queueEntry.receivedSignedReceipt != null + const hasReceivedApplyReceiptForRepair = queueEntry.signedReceiptForRepair != null + const shortID = queueEntry.logID //`${utils.makeShortHash(queueEntry.acceptedTx.id)}` + + // on the off chance we are here with a pass of fail state remove this from the queue. + // log fatal because we do not want to get to this situation. + if (queueEntry.state === 'pass' || queueEntry.state === 'fail') { + this.statemanager_fatal( + `pass or fail entry should not be in queue`, + `txid: ${shortID} state: ${queueEntry.state} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}` + ) + this.removeFromQueue(queueEntry, currentIndex) + continue + } + + //turn off all this logic to futher simplify things + if (this.queueTimingFixes === false) { + // TIME OUT / EXPIRATION CHECKS + // Check if transactions have expired and failed, or if they have timed out and ne need to request receipts. + if (this.stateManager.accountSync.dataSyncMainPhaseComplete === true) { + // Everything in here is after we finish our initial sync + + // didSync: refers to the syncing process. True is for TXs that we were notified of + // but had to delay action on because the initial or a runtime thread was busy syncing on. + + // For normal didSync===false TXs we are expiring them after M3*2 + // This gives a bit of room to attempt a repair. + // if a repair or reciept process fails there are cases below to expire the the + // tx as early as time > M3 + if (txAge > timeM3 * 2 && queueEntry.didSync == false) { + //this.statistics.incrementCounter('txExpired') + //let seenInQueue = this.processQueue_accountSeen(seenAccounts, queueEntry) + + this.statemanager_fatal( + `txExpired1 > M3 * 2. NormalTX Timed out.`, + `txExpired txAge > timeM3*2 && queueEntry.didSync == false. ` + + `txid: ${shortID} state: ${ + queueEntry.state + } applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${ + queueEntry.receiptEverRequested + } age:${txAge} ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}` + ) + if (queueEntry.receiptEverRequested && queueEntry.globalModification === false) { + this.statemanager_fatal( + `txExpired1 > M3 * 2 -!receiptEverRequested`, + `txExpired txAge > timeM3*2 && queueEntry.didSync == false. !receiptEverRequested ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}` + ) + } + if (queueEntry.globalModification) { + this.statemanager_fatal( + `txExpired1 > M3 * 2 -GlobalModification!!`, + `txExpired txAge > timeM3*2 && queueEntry.didSync == false. !receiptEverRequested ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}` + ) + } + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3 * 2. NormalTX Timed out. didSync == false. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) + + if (configContext.stateManager.disableTxExpiration === false) { + this.setTXExpired(queueEntry, currentIndex, 'old, timeM3 * 2') + continue + } + } + + // lots of logic about when we can repair or not repair/when to wait etc. + if (this.queueTimingFixes === false) { + // This is the expiry case where requestingReceiptFailed + if (txAge > timeM3 && queueEntry.requestingReceiptFailed) { + //this.statistics.incrementCounter('txExpired') + + this.statemanager_fatal( + `txExpired3 > M3. receiptRequestFail after Timed Out`, + `txExpired txAge > timeM3 && queueEntry.requestingReceiptFailed ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 3 requestingReceiptFailed ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt 3 requestingReceiptFailed: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3. receiptRequestFail after Timed Out. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) + + if (configContext.stateManager.disableTxExpiration === false) { + this.setTXExpired(queueEntry, currentIndex, 'old, timeM3, requestingReceiptFailed') + continue + } + } + + // This is the expiry case where repairFailed + // TODO. I think as soon as a repair as marked as failed we can expire and remove it from the queue + // But I am leaving this optimizaiton out for now since we really don't want to plan on repairs failing + if (txAge > timeM3 && queueEntry.repairFailed) { + //this.statistics.incrementCounter('txExpired') + + this.statemanager_fatal( + `txExpired3 > M3. repairFailed after Timed Out`, + `txExpired txAge > timeM3 && queueEntry.repairFailed ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 3 repairFailed ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt 3 repairFailed: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3. repairFailed after Timed Out. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) + + if (configContext.stateManager.disableTxExpiration === false) { + this.setTXExpired(queueEntry, currentIndex, 'old, timeM3, repairFailed') + continue + } + } + + // a few cases to wait for a receipt or request a receipt + if (queueEntry.state != 'await repair' && queueEntry.state != 'commiting') { + //Not yet expired case: getting close to expire so just move to consensing and wait. + //Just wait for receipt only if we are awaiting data and it is getting late + if ( + txAge > timeM2_5 && + queueEntry.m2TimeoutReached === false && + queueEntry.globalModification === false && + queueEntry.requestingReceipt === false + ) { + if (queueEntry.state == 'awaiting data') { + // no receipt yet, and state not committing + if (queueEntry.receivedSignedReceipt == null && queueEntry.signedReceipt == null) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`Wait for reciept only: txAge > timeM2_5 txid:${shortID} `) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txMissingReceipt3', `${shortID}`, `processAcceptedTxQueue ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txMissingReceipt', `Wait for reciept only: txAge > timeM2.5. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) + queueEntry.waitForReceiptOnly = true + queueEntry.m2TimeoutReached = true + + if (this.config.stateManager.txStateMachineChanges) { + this.updateTxState(queueEntry, 'await final data', 'processTx2') + } else { + this.updateTxState(queueEntry, 'consensing') + } + continue + } + } + } + + //receipt requesting is not going to work with current timeouts. + if (queueEntry.requestingReceipt === true) { + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + continue + } + + // The TX technically expired past M3, but we will now request reciept in hope that we can repair the tx + if ( + txAge > timeM3 && + queueEntry.requestingReceiptFailed === false && + queueEntry.globalModification === false + ) { + if (this.stateManager.hasReceipt(queueEntry) === false && queueEntry.requestingReceipt === false) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`txAge > timeM3 => ask for receipt now ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txMissingReceipt1', `txAge > timeM3 ${shortID}`, `syncNeedsReceipt ${shortID}`) + + const seen = this.processQueue_accountSeen(seenAccounts, queueEntry) + + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + this.queueEntryRequestMissingReceipt(queueEntry) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txMissingReceipt', `txAge > timeM3 => ask for receipt now. state:${queueEntry.state} globalMod:${queueEntry.globalModification} seen:${seen}`) + queueEntry.waitForReceiptOnly = true + queueEntry.m2TimeoutReached = true + + if (this.config.stateManager.txStateMachineChanges) { + this.updateTxState(queueEntry, 'await final data', 'processTx3') + } else { + this.updateTxState(queueEntry, 'consensing') + } + continue + } + } + } + } + } else { + //check for TX older than 30x M3 and expire them + if (txAge > timeM3 * 50) { + //this.statistics.incrementCounter('txExpired') + + this.statemanager_fatal( + `txExpired4`, + `Still on inital syncing. txExpired txAge > timeM3 * 50. ` + + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 4 ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt 4: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `txExpired txAge > timeM3 * 50. still syncing. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) + + this.setTXExpired(queueEntry, currentIndex, 'old, timeM3 * 50!!') + continue + } + } + } + + if (this.queueTimingFixes === true) { + //if we are still waiting on an upstream TX at this stage in the pipeline, + //then kill the TX because there is not much hope for it + //This will help make way for other TXs with a better chance + if (queueEntry.state === 'processing' || queueEntry.state === 'awaiting data') { + if (this.processQueue_accountSeen(seenAccounts, queueEntry) === true) { + //adding txSieve time! + if (txAge > timeM2 + queueEntry.txSieveTime) { + if (configContext.stateManager.disableTxExpiration === false) { + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M2 canceled due to upstream TXs. state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification}`) + //todo only keep on for temporarliy + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M2 canceled due to upstream TXs. sieveT:${queueEntry.txSieveTime}`) + this.setTXExpired(queueEntry, currentIndex, 'm2, processing or awaiting') + if (configContext.stateManager.stuckTxQueueFix) continue // we need to skip this TX and move to the next one + } + if (configContext.stateManager.stuckTxQueueFix === false) continue + } + } + } + // check if we seen a vote or has a vote + const hasSeenVote = queueEntry.receivedBestVote != null || queueEntry.ourVote != null + const hasSeenConfirmation = queueEntry.receivedBestConfirmation != null + + // remove TXs that are stuck in the processing queue for 2 min + if ( + configContext.stateManager.removeStuckTxsFromQueue === true && + txAge > configContext.stateManager.stuckTxRemoveTime + ) { + nestedCountersInstance.countEvent( + 'txSafelyRemoved', + `stuck_in_consensus_1 ${configContext.stateManager.stuckTxRemoveTime / 1000}` + ) + this.statemanager_fatal( + `txSafelyRemoved_1`, + `stuck_in_consensus_3 txid: ${shortID} state: ${queueEntry.state} age:${txAge}` + ) + if (logFlags.txCancel) + this.statemanager_fatal(`txSafelyRemoved_1_dump`, `${this.getDebugQueueInfo(queueEntry)}`) + this.removeFromQueue(queueEntry, currentIndex) + continue + } + + if (configContext.stateManager.removeStuckTxsFromQueue2 === true) { + const timeSinceLastVoteMessage = + queueEntry.lastVoteReceivedTimestamp > 0 ? currentTime - queueEntry.lastVoteReceivedTimestamp : 0 + // see if we have been consensing for more than a long time. + // follow up code needs to handle this in a better way + // if there is a broken TX at the end of a chain. this will peel it off. + // any freshly exposed TXs will have a fair amount of time to be in consensus so + // this should minimize the risk of OOS. + if (timeSinceLastVoteMessage > configContext.stateManager.stuckTxRemoveTime2) { + nestedCountersInstance.countEvent( + 'txSafelyRemoved', + `stuck_in_consensus_2 tx waiting for votes more than ${ + configContext.stateManager.stuckTxRemoveTime2 / 1000 + } seconds. state: ${queueEntry.state}` + ) + this.statemanager_fatal( + `txSafelyRemoved_2`, + `stuck_in_consensus_2. waiting for votes. txid: ${shortID} state: ${ + queueEntry.state + } age:${txAge} tx first vote seen ${timeSinceLastVoteMessage / 1000} seconds ago` + ) + if (logFlags.txCancel) + this.statemanager_fatal(`txSafelyRemoved_2_dump`, `${this.getDebugQueueInfo(queueEntry)}`) + this.removeFromQueue(queueEntry, currentIndex) + continue + } + } + + if (configContext.stateManager.removeStuckTxsFromQueue3 === true) { + if (queueEntry.state === 'consensing' && txAge > configContext.stateManager.stuckTxRemoveTime3) { + const anyVotes = queueEntry.lastVoteReceivedTimestamp > 0 + nestedCountersInstance.countEvent( + 'txSafelyRemoved', + `stuck_in_consensus_3 tx in consensus more than ${ + configContext.stateManager.stuckTxRemoveTime3 / 1000 + } seconds. state: ${queueEntry.state} has seen vote: ${anyVotes}` + ) + this.statemanager_fatal( + `txSafelyRemoved_3`, + `stuck_in_consensus_3. txid: ${shortID} state: ${queueEntry.state} age:${txAge}` + ) + if (logFlags.txCancel) + this.statemanager_fatal(`txSafelyRemoved_3_dump`, `${this.getDebugQueueInfo(queueEntry)}`) + this.removeFromQueue(queueEntry, currentIndex) + continue + } + } + + if (txAge > timeM3 + configContext.stateManager.noVoteSeenExpirationTime && !hasSeenVote) { + // seen no vote but past timeM3 + noVoteSeenExpirationTime + // nestedCountersInstance.countEvent('txExpired', `> timeM3 + noVoteSeenExpirationTime`) + // this.mainLogger.error(`${queueEntry.logID} txAge > timeM3 + noVoteSeenExpirationTime general case. no vote seen`) + if (configContext.stateManager.disableTxExpiration === false) { + this.setTXExpired( + queueEntry, + currentIndex, + 'txAge > timeM3 + noVoteSeenExpirationTime general case. no vote seen' + ) + continue + } + } + if ( + txAge > timeM3 + configContext.stateManager.voteSeenExpirationTime && + hasSeenVote && + !hasSeenConfirmation + ) { + if (configContext.stateManager.disableTxExpiration === false) { + nestedCountersInstance.countEvent('txExpired', `> timeM3 + voteSeenExpirationTime`) + this.mainLogger.error( + `${queueEntry.logID} txAge > timeM3 + voteSeenExpirationTime general case has vote but fail to generate receipt` + ) + this.setTXExpired( + queueEntry, + currentIndex, + 'txAge > timeM3 + voteSeenExpirationTime general case has vote but fail' + ' to' + ' commit the tx' + ) + continue + } + } + if (txAge > timeM3 + configContext.stateManager.confirmationSeenExpirationTime) { + let shouldExpire = true + if (queueEntry.hasRobustConfirmation && queueEntry.isInExecutionHome) { + nestedCountersInstance.countEvent( + 'txExpired', + `> timeM3 + confirmSeenExpirationTime but hasRobustConfirmation = true, not expiring` + ) + shouldExpire = false + } + if (shouldExpire && configContext.stateManager.disableTxExpiration === false) { + nestedCountersInstance.countEvent( + 'txExpired', + `> timeM3 + confirmSeenExpirationTime hasRobustConfirmation: ${queueEntry.hasRobustConfirmation}` + ) + this.setTXExpired( + queueEntry, + currentIndex, + 'txAge > timeM3 + confirmSeenExpirationTime general case has' + + ' vote and robust confirmation but fail' + + ' to' + + ' commit the tx' + ) + continue + } + } + if (txAge > timeM3 + configContext.stateManager.confirmationSeenExpirationTime + 10000) { + // nestedCountersInstance.countEvent('txExpired', `txAge > timeM3 + confirmSeenExpirationTime + 10s`) + // maybe we missed the spread_appliedReceipt2 gossip, go to await final data if we have a confirmation + // we will request the final data (and probably receipt2) + if ( + configContext.stateManager.disableTxExpiration && + hasSeenVote && + queueEntry.firstVoteReceivedTimestamp > 0 + ) { + // nestedCountersInstance.countEvent('txExpired', `> timeM3 + confirmSeenExpirationTime state: ${queueEntry.state} hasSeenVote: ${hasSeenVote} hasSeenConfirmation: ${hasSeenConfirmation} waitForReceiptOnly: ${queueEntry.waitForReceiptOnly}`) + if (this.config.stateManager.txStateMachineChanges) { + if (configContext.stateManager.stuckTxQueueFix) { + if (configContext.stateManager.singleAccountStuckFix) { + const timeSinceVoteSeen = shardusGetTime() - queueEntry.firstVoteReceivedTimestamp + // if we has seenVote but still stuck in consensing state, we should go to await final data and ask receipt+data + + //note: this block below may not be what we want in POQo, but is behind a long time setting for now (in dapp) + //need to consider some clean up here + if ( + queueEntry.state === 'consensing' && + timeSinceVoteSeen > configContext.stateManager.stuckTxMoveTime + ) { + if (logFlags.debug) + this.mainLogger.debug( + `txId ${queueEntry.logID} move stuck consensing tx to await final data. timeSinceVoteSeen: ${timeSinceVoteSeen} ms` + ) + nestedCountersInstance.countEvent('consensus', `move stuck consensing tx to await final data.`) + this.updateTxState(queueEntry, 'await final data') + } + } else { + // make sure we are not resetting the state and causing state start timestamp to be updated repeatedly + if (queueEntry.state !== 'await final data' && queueEntry.state !== 'await repair') + this.updateTxState(queueEntry, 'await final data') + } + } else { + this.updateTxState(queueEntry, 'await final data', 'processTx4') + } + } else { + this.updateTxState(queueEntry, 'consensing') + } + if (configContext.stateManager.stuckTxQueueFix === false) continue // we should not skip this TX + } + if (configContext.stateManager.disableTxExpiration === false) { + this.setTXExpired(queueEntry, currentIndex, 'txAge > timeM3 + confirmSeenExpirationTime + 10s') + continue + } + } + + //If we are past time M2 there are few cases where we should give up on a TX right away + //Handle that here + if (txAge > timeM2) { + let expireTx = false + let reason = '' + //not sure this path can even happen. but we addding it for completeness in case it comes back (abilty to requets receipt) + if (queueEntry.requestingReceiptFailed) { + expireTx = true + reason = 'requestingReceiptFailed' + } + if (queueEntry.repairFailed) { + expireTx = true + reason = 'repairFailed' + } + if (expireTx) { + this.statemanager_fatal( + `txExpired3 > M2. fail ${reason}`, + `txExpired txAge > timeM2 fail ${reason} ` + + `txid: ${shortID} state: ${queueEntry.state} hasAll:${queueEntry.hasAll} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired >m2 fail ${reason} ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) + //if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.recievedAppliedReceipt 3 requestingReceiptFailed: ${utils.stringifyReduce(queueEntry.recievedAppliedReceipt)}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> timeM2 fail ${reason} state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification} `) + + if (configContext.stateManager.disableTxExpiration === false) { + this.setTXExpired(queueEntry, currentIndex, 'm2 ' + reason) + } + } + } + + //if(extendedTimeoutLogic === true){ + + //} + const isConsensing = queueEntry.state === 'consensing' + //let isCommiting = queueEntry.state === 'commiting' + const isAwaitingFinalData = queueEntry.state === 'await final data' + const isInExecutionHome = queueEntry.isInExecutionHome + //note this wont work with old receipts but we can depricate old receipts soon + const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) + const hasReceipt = signedReceipt != null + const hasCastVote = queueEntry.ourVote != null + + let extraTime = 0 + //let cantExpire = false + let matchingReceipt = false + + if (isInExecutionHome && isConsensing && hasReceipt === false) { + //give a bit of extra time to wait for votes to come in + extraTime = timeM * 0.5 + } + + //this should cover isCommiting + if (isInExecutionHome && hasReceipt) { + matchingReceipt = this.stateManager.transactionConsensus.hasAppliedReceiptMatchingPreApply(queueEntry, null) + //give even more time + extraTime = timeM + } + + // if we have not added extra time yet then add time for a vote. + if (extraTime < timeM && hasCastVote === true) { + //this would be a way to just statically add to the time + //extraTime = timeM + const ageDiff = queueEntry.voteCastAge + timeM - timeM3 + if (ageDiff > 0) { + extraTime = ageDiff + } + } + + if (isAwaitingFinalData) { + if (hasReceipt) { + extraTime = timeM2 * 1.5 + } else { + extraTime = timeM + } + } + + //round extraTime to up to nearest 500ms (needed for counter aggregation) + if (extraTime > 0) { + extraTime = Math.ceil(extraTime / 500) * 500 + if (extraTime > timeM) { + extraTime = timeM + } + } + + // Have a hard cap where we ALMOST expire but NOT remove TXs from queue after time > M3 + if ( + txAge > timeM3 + extraTime && + queueEntry.isInExecutionHome && + queueEntry.almostExpired == null && + configContext.stateManager.disableTxExpiration === false + ) { + const hasVoted = queueEntry.ourVote != null + const receivedVote = queueEntry.receivedBestVote != null + if (!receivedVote && !hasVoted && queueEntry.almostExpired == null) { + this.statemanager_fatal( + `setTxAlmostExpired > M3. general case`, + `setTxAlmostExpired txAge > timeM3 general case ` + + `txid: ${shortID} state: ${queueEntry.state} hasAll:${ + queueEntry.hasAll + } applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge} hasReceipt:${hasReceipt} matchingReceipt:${matchingReceipt} isInExecutionHome:${isInExecutionHome} hasVote: ${ + queueEntry.receivedBestVote != null + }` + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `setTxAlmostExpired ${queueEntry.txGroupDebug} txExpired 3 requestingReceiptFailed ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) + //if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.recievedAppliedReceipt 3 requestingReceiptFailed: ${utils.stringifyReduce(queueEntry.recievedAppliedReceipt)}`) + + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `setTxAlmostExpired > M3. general case state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification} hasReceipt:${hasReceipt} matchingReceipt:${matchingReceipt} isInExecutionHome:${isInExecutionHome} hasVote: ${queueEntry.receivedBestVote != null}`) + /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `setTxAlmostExpired > M3. general case sieveT:${queueEntry.txSieveTime} extraTime:${extraTime}`) + + nestedCountersInstance.countEvent( + 'txExpired', + 'set to almostExpired because we have not voted' + ' or received' + ' a' + ' vote' + ) + this.setTxAlmostExpired(queueEntry, currentIndex, 'm3 general: almostExpired not voted or received vote') + } + // continue + } + + //TODO? could we remove a TX from the queu as soon as a receit was requested? + //TODO?2 should we allow a TX to use a repair op shortly after being expired? (it would have to be carefull, and maybe use some locking) + } + + const txStartTime = shardusGetTime() + + // HANDLE TX logic based on state. + try { + this.profiler.profileSectionStart(`process-${queueEntry.state}`) + if (logFlags.profiling_verbose) + profilerInstance.scopedProfileSectionStart(`scoped-process-${queueEntry.state}`, false) + pushedProfilerTag = queueEntry.state + + if (queueEntry.state === 'syncing') { + ///////////////////////////////////////////////--syncing--//////////////////////////////////////////////////////////// + // a queueEntry will be put in syncing state if it is queue up while we are doing initial syncing or if + // we are syncing a range of new edge partition data. + // we hold it in limbo until the syncing operation is complete. When complete all of these TXs are popped + // and put back into the queue. If it has been too long they will go into a repair to receipt mode. + // IMPORTANT thing is that we mark the accounts as seen, because we cant use this account data + // in TXs that happen after until this is resolved. + + //the syncing process is not fully reliable when popping synced TX. this is a backup check to see if we can get out of syncing state + if (queueEntry.syncCounter <= 0) { + nestedCountersInstance.countEvent('sync', 'syncing state needs bump') + + queueEntry.waitForReceiptOnly = true + + // old logic changed state here (seen commented out in the new mode) + if (this.config.stateManager.txStateMachineChanges) { + // this.updateTxState(queueEntry, 'await final data') + } else { + this.updateTxState(queueEntry, 'await final data', 'processTx5') + } + } + + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + } else if (queueEntry.state === 'aging') { + queueEntry.executionDebug = { a: 'go' } + ///////////////////////////////////////////--aging--//////////////////////////////////////////////////////////////// + // We wait in the aging phase, and mark accounts as seen to prevent a TX that is after this from using or changing data + // on the accounts in this TX + // note that code much earlier in the loop rejects any queueEntries younger than time M + this.updateTxState(queueEntry, 'processing') + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + } + if (queueEntry.state === 'processing') { + ////////////////////////////////////////--processing--/////////////////////////////////////////////////////////////////// + if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { + // Processing is when we start doing real work. the task is to read and share the correct account data to the correct + // corresponding nodes and then move into awaiting data phase + + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + const time = shardusGetTime() + try { + // TODO re-evaluate if it is correct for us to share info for a global modifing TX. + //if(queueEntry.globalModification === false) { + const awaitStart = shardusGetTime() + + if (this.executeInOneShard === true) { + /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodes(queueEntry)') + profilerInstance.scopedProfileSectionStart(`scoped-tellCorrespondingNodes`) + if (configContext.p2p.useFactCorrespondingTell) { + await this.factTellCorrespondingNodes(queueEntry) + } else { + await this.tellCorrespondingNodes(queueEntry) + } + profilerInstance.scopedProfileSectionEnd(`scoped-tellCorrespondingNodes`) + /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodes(queueEntry)', DebugComplete.Completed) + } else { + /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodesOld(queueEntry)') + //specific fixes were needed for tellCorrespondingNodes. tellCorrespondingNodesOld is the old version before fixes + if (configContext.p2p.useFactCorrespondingTell) { + await this.factTellCorrespondingNodes(queueEntry) + } else { + await this.tellCorrespondingNodes(queueEntry) + } + /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodesOld(queueEntry)', DebugComplete.Completed) + } + queueEntry.dataSharedTimestamp = shardusGetTime() + if (logFlags.debug) + /* prettier-ignore */ this.mainLogger.debug(`tellCorrespondingNodes: ${queueEntry.logID} dataSharedTimestamp: ${queueEntry.dataSharedTimestamp}`) + + this.updateSimpleStatsObject( + processStats.awaitStats, + 'tellCorrespondingNodes', + shardusGetTime() - awaitStart + ) + + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_processing', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)}`) + //} + } catch (ex) { + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 tellCorrespondingNodes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) + this.statemanager_fatal( + `processAcceptedTxQueue2_ex`, + 'processAcceptedTxQueue2 tellCorrespondingNodes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack + ) + queueEntry.dataSharedTimestamp = shardusGetTime() + nestedCountersInstance.countEvent(`processing`, `tellCorrespondingNodes fail`) + + queueEntry.executionDebug.process1 = 'tell fail' + } finally { + this.updateTxState(queueEntry, 'awaiting data', 'mainLoop') + + //if we are not going to execute the TX go strait to consensing + if ( + queueEntry.globalModification === false && + this.executeInOneShard && + queueEntry.isInExecutionHome === false + ) { + //is there a way to preemptively forward data without there being tons of repair.. + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 isInExecutionHome === false. set state = 'consensing' tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp}`) + this.updateTxState(queueEntry, 'consensing', 'fromProcessing') + } + } + queueEntry.executionDebug.processElapsed = shardusGetTime() - time + } else { + const upstreamTx = this.processQueue_getUpstreamTx(seenAccounts, queueEntry) + if (upstreamTx == null) { + /* prettier-ignore */ if (logFlags.seqdiagram && queueEntry?.upStreamBlocker !== 'null') { + queueEntry.upStreamBlocker = 'null' // 'dirty' + this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: upstream:null`) + } + nestedCountersInstance.countEvent('processing', 'busy waiting the upstream tx.' + ' but it is null') + } else { + if (upstreamTx.logID === queueEntry.logID) { + /* prettier-ignore */ if (logFlags.seqdiagram && queueEntry?.upStreamBlocker !== upstreamTx.logID) { + queueEntry.upStreamBlocker = upstreamTx.logID + this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: upstream:same`) + } + //not 100% confident that upstreamTX check works. + if (upstreamTx === queueEntry) { + //this queue entry could be marked as seen due to aging above + nestedCountersInstance.countEvent( + 'processing', + 'busy waiting but the upstream tx reference matches our queue entry' + ) + } else { + nestedCountersInstance.countEvent('processing', 'busy waiting the upstream tx but it is same txId') + } + } else { + /* prettier-ignore */ if (logFlags.seqdiagram && queueEntry?.upStreamBlocker !== upstreamTx.logID) { + queueEntry.upStreamBlocker = upstreamTx.logID + this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: upstream:${upstreamTx.logID}`) + } + nestedCountersInstance.countEvent( + 'processing', + `busy waiting the upstream tx to complete. state ${queueEntry.state}` + ) + } + } + } + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + } + if (queueEntry.state === 'awaiting data') { + queueEntry.executionDebug.log = 'entered awaiting data' + + // TODO review this block below in more detail. + // check if we have all accounts + if (queueEntry.hasAll === false && txAge > timeM2) { + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + + if (queueEntry.pendingDataRequest === true) { + //early out after marking seen, because we are already asking for data + //need to review this in context of sharding + nestedCountersInstance.countEvent('processing', 'awaiting data. pendingDataRequest') + continue + } + + if (this.queueEntryHasAllData(queueEntry) === true) { + // I think this can't happen + /* prettier-ignore */ nestedCountersInstance.countEvent('processing', 'data missing at t>M2. but not really. investigate further') + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_hadDataAfterall', `${shortID}`, `This is kind of an error, and should not happen`) + continue + } + + //TODO check for receipt and move to repair state / await final data + + if (this.config.stateManager.awaitingDataCanBailOnReceipt) { + const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) + if (signedReceipt != null) { + //we saw a receipt so we can move to await final data + nestedCountersInstance.countEvent( + 'processing', + 'awaitingDataCanBailOnReceipt: activated. tx state changed from awaiting data to await final data' + ) + this.updateTxState(queueEntry, 'await final data', 'receipt while waiting for initial data') + continue + } + } + + if (this.config.stateManager.requestAwaitedDataAllowed) { + // Before we turn this back on we must set the correct conditions. + // our node may be unaware of how other nodes have upstream blocking TXs that + // prevent them from sharing data. The only safe way to know if we can ask for data + // is to know another node has voted but this has some issues as well + + // 7. Manually request missing state + try { + nestedCountersInstance.countEvent('processing', 'data missing at t>M2. request data') + // Await note: current thinking is that is is best to not await this call. + this.queueEntryRequestMissingData(queueEntry) + } catch (ex) { + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 queueEntryRequestMissingData:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) + this.statemanager_fatal( + `processAcceptedTxQueue2_missingData`, + 'processAcceptedTxQueue2 queueEntryRequestMissingData:' + + ex.name + + ': ' + + ex.message + + ' at ' + + ex.stack + ) + } + } + } else if (queueEntry.hasAll) { + queueEntry.executionDebug.log1 = 'has all' + + // we have all the data, but we need to make sure there are no upstream TXs using accounts we need first. + if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + + // As soon as we have all the data we preApply it and then send out a vote + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preApplyTx', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + + try { + //This is a just in time check to make sure our involved accounts + //have not changed after our TX timestamp + const accountsValid = this.checkAccountTimestamps(queueEntry) + if (accountsValid === false) { + this.updateTxState(queueEntry, 'consensing') + queueEntry.preApplyTXResult = { + applied: false, + passed: false, + applyResult: 'failed account TS checks', + reason: 'apply result', + applyResponse: null, + } + continue + } + + if (queueEntry.transactionGroup.length > 1) { + queueEntry.robustAccountDataPromises = {} + } + + queueEntry.executionDebug.log2 = 'call pre apply' + const awaitStart = shardusGetTime() + /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.preApplyTransaction(queueEntry)') + let txResult = undefined + if (this.config.stateManager.transactionApplyTimeout > 0) { + //use the withTimeout from util/promises to call preApplyTransaction with a timeout + txResult = await withTimeout( + () => this.preApplyTransaction(queueEntry), + this.config.stateManager.transactionApplyTimeout + ) + if (txResult === 'timeout') { + //if we got a timeout, we need to set the txResult to null + txResult = null + nestedCountersInstance.countEvent('processing', 'timeout-preApply') + this.statemanager_fatal( + 'timeout-preApply', + `preApplyTransaction timed out for txid: ${ + queueEntry.logID + } ${this.getDebugProccessingStatus()}` + ) + //need to clear any stuck fifo locks. Would be better to solve upstream problems. + this.stateManager.forceUnlockAllFifoLocks('timeout-preApply') + } + } else { + txResult = await this.preApplyTransaction(queueEntry) + } + + /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.preApplyTransaction(queueEntry)', DebugComplete.Completed) + this.updateSimpleStatsObject( + processStats.awaitStats, + 'preApplyTransaction', + shardusGetTime() - awaitStart + ) + + queueEntry.executionDebug.log3 = 'called pre apply' + queueEntry.executionDebug.txResult = txResult + + if ( + configContext.stateManager.forceVoteForFailedPreApply || + (txResult && txResult.applied === true) + ) { + this.updateTxState(queueEntry, 'consensing') + + queueEntry.preApplyTXResult = txResult + + // make sure our data wrappers are upt to date with the correct hash and timstamp + for (const key of Object.keys(queueEntry.collectedData)) { + // eslint-disable-next-line security/detect-object-injection + const wrappedAccount = queueEntry.collectedData[key] + const { timestamp, hash } = this.app.getTimestampAndHashFromAccount(wrappedAccount.data) + if (wrappedAccount.timestamp != timestamp) { + wrappedAccount.timestamp = timestamp + nestedCountersInstance.countEvent('transactionQueue', 'correctedTimestamp') + } + // eslint-disable-next-line security/detect-possible-timing-attacks + if (wrappedAccount.stateId != hash) { + wrappedAccount.stateId = hash + nestedCountersInstance.countEvent('transactionQueue', 'correctedHash') + } + } + + //Broadcast our vote + if (queueEntry.noConsensus === true) { + // not sure about how to share or generate an applied receipt though for a no consensus step + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preApplyTx_noConsensus', `${shortID}`, ``) + + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 noConsensus : ${queueEntry.logID} `) + + this.updateTxState(queueEntry, 'commiting') + + queueEntry.hasValidFinalData = true + // TODO Global receipts? do we want them? + // if(queueEntry.globalModification === false){ + // //Send a special receipt because this is a set command. + // } + } else { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preApplyTx_createAndShareVote', `${shortID}`, ``) + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 calling createAndShareVote : ${queueEntry.logID} `) + const awaitStart = shardusGetTime() + + queueEntry.voteCastAge = txAge + /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.createAndShareVote(queueEntry)' ) + await this.stateManager.transactionConsensus.createAndShareVote(queueEntry) + /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.createAndShareVote(queueEntry)', DebugComplete.Completed ) + this.updateSimpleStatsObject( + processStats.awaitStats, + 'createAndShareVote', + shardusGetTime() - awaitStart + ) + } + } else { + //There was some sort of error when we tried to apply the TX + //Go directly into 'consensing' state, because we need to wait for a receipt that is good. + /* prettier-ignore */ nestedCountersInstance.countEvent('processing', `txResult apply error. applied: ${txResult?.applied}`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`processAcceptedTxQueue2 txResult problem txid:${queueEntry.logID} res: ${utils.stringifyReduce(txResult)} `) + queueEntry.waitForReceiptOnly = true + + // if apply failed, we need to go to consensing to get a receipt + this.updateTxState(queueEntry, 'consensing') + //TODO: need to flag this case so that it does not artificially increase the network load + } + } catch (ex) { + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 preApplyAcceptedTransaction:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) + this.statemanager_fatal( + `processAcceptedTxQueue2b_ex`, + 'processAcceptedTxQueue2 preApplyAcceptedTransaction:' + + ex.name + + ': ' + + ex.message + + ' at ' + + ex.stack + ) + } finally { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preapplyFinish', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) + } + } else { + queueEntry.executionDebug.logBusy = 'has all, but busy' + nestedCountersInstance.countEvent('processing', 'has all, but busy') + } + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + } else { + // mark accounts as seen while we are waiting for data + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + } + } else if (queueEntry.state === 'consensing') { + await handleConsensingState.call(this, queueEntry, seenAccounts, currentTime, processStats, hasReceivedApplyReceipt, currentIndex, shortID, hasApplyReceipt, txAge, timeM5) + } + if (queueEntry.state === 'await repair') { + ///////////////////////////////////////////--await repair--//////////////////////////////////////////////////////////////// + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + + // Special state that we are put in if we are waiting for a repair to receipt operation to conclude + if (queueEntry.repairFinished === true) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitRepair_repairFinished', `${shortID}`, `qId: ${queueEntry.entryID} result:${queueEntry.signedReceiptForRepair.proposal.applied} txAge:${txAge} `) + if (queueEntry.signedReceiptForRepair.proposal.applied === true) { + this.updateTxState(queueEntry, 'pass') + } else { + // technically should never get here, because we dont need to repair to a receipt when the network did not apply the TX + this.updateTxState(queueEntry, 'fail') + } + // most remove from queue at the end because it compacts the queue entry + this.removeFromQueue(queueEntry, currentIndex) + + // console.log('Await Repair Finished', queueEntry.acceptedTx.txId, queueEntry) + + nestedCountersInstance.countEvent('stateManager', 'repairFinished') + continue + } else if (queueEntry.repairFailed === true) { + // if the repair failed, we need to fail the TX. Let the patcher take care of it. + this.updateTxState(queueEntry, 'fail') + this.removeFromQueue(queueEntry, currentIndex) + nestedCountersInstance.countEvent('stateManager', 'repairFailed') + continue + } + } + if (queueEntry.state === 'await final data') { + //wait patiently for data to match receipt + //if we run out of time repair to receipt? + + if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { + this.processQueue_markAccountsSeen(seenAccounts, queueEntry) + + // //temp hack ... hopefully this hack can go away + // if (queueEntry.recievedAppliedReceipt == null || queueEntry.recievedAppliedReceipt2 == null) { + // const result = await this.stateManager.transactionConsensus.tryProduceReceipt(queueEntry) + // if (result != null) { + // queueEntry.recievedAppliedReceipt = result + // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitFinalData_hackReceipt', `${shortID}`, `qId: ${queueEntry.entryID} result:${utils.stringifyReduce(result)}`) + // } + // } + + // remove from queue if we have commited data for this tx + if (configContext.stateManager.attachDataToReceipt && queueEntry.accountDataSet === true) { + if (logFlags.debug) + this.mainLogger.debug( + `shrd_awaitFinalData_removeFromQueue : ${queueEntry.logID} because accountDataSet is true` + ) + this.removeFromQueue(queueEntry, currentIndex) + //this will possibly skip critical stats or exit steps that invoke a transaction applied event to the dapp + continue + } + + //collectedFinalData + //PURPL-74 todo: get the vote from queueEntry.receivedBestVote or receivedBestConfirmation instead of receipt2 + const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) + const timeSinceAwaitFinalStart = + queueEntry.txDebug.startTimestamp['await final data'] > 0 + ? shardusGetTime() - queueEntry.txDebug.startTimestamp['await final data'] + : 0 + + const accountsNotStored = new Set() + //if we got a vote above then build a list of accounts that we store but are missing in our + //collectedFinalData + if (signedReceipt) { + let failed = false + let incomplete = false + let skipped = 0 + const missingAccounts = [] + const nodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = + this.stateManager.currentCycleShardData.nodeShardData + + /* eslint-disable security/detect-object-injection */ + for (let i = 0; i < signedReceipt.proposal.accountIDs.length; i++) { + const accountID = signedReceipt.proposal.accountIDs[i] + const accountHash = signedReceipt.proposal.afterStateHashes[i] + + //only check for stored keys. + if (ShardFunctions.testAddressInRange(accountID, nodeShardData.storedPartitions) === false) { + skipped++ + accountsNotStored.add(accountID) + continue + } + + const wrappedAccount = queueEntry.collectedFinalData[accountID] + if (wrappedAccount == null) { + incomplete = true + queueEntry.debug.waitingOn = accountID + missingAccounts.push(accountID) + // break + } + if (wrappedAccount && wrappedAccount.stateId != accountHash) { + if (logFlags.debug) + this.mainLogger.debug( + `shrd_awaitFinalData_failed : ${queueEntry.logID} wrappedAccount.stateId != accountHash from the vote` + ) + failed = true + //we should be verifying the tate IDS that are pushed into collectedFinal data so this should not happen. if it does that could cause a stuck TX / local oos + nestedCountersInstance.countEvent( + 'stateManager', + `shrd_awaitFinalData failed state check wrappedAccount.stateId != accountHash` + ) + break + } + } + + // if we have missing accounts, we need to request the data + if (incomplete && missingAccounts.length > 0) { + nestedCountersInstance.countEvent( + 'stateManager', + `shrd_awaitFinalData missing accounts ${missingAccounts.length}` + ) + + // start request process for missing data if we waited long enough + let shouldStartFinalDataRequest = false + if (timeSinceAwaitFinalStart > 5000) { + shouldStartFinalDataRequest = true + if (logFlags.verbose) + /* prettier-ignore */ this.mainLogger.debug(`shrd_awaitFinalData_incomplete : ${queueEntry.logID} starting finalDataRequest timeSinceDataShare: ${timeSinceAwaitFinalStart}`) + } else if (txAge > timeM3) { + // by this time we should have all the data we need + shouldStartFinalDataRequest = true + if (logFlags.verbose) + /* prettier-ignore */ this.mainLogger.debug(`shrd_awaitFinalData_incomplete : ${queueEntry.logID} starting finalDataRequest txAge > timeM3 + confirmationSeenExpirationTime`) + } + + // start request process for missing data + const timeSinceLastFinalDataRequest = shardusGetTime() - queueEntry.lastFinalDataRequestTimestamp + if ( + this.config.stateManager.canRequestFinalData && + shouldStartFinalDataRequest && + timeSinceLastFinalDataRequest > 5000 + ) { + nestedCountersInstance.countEvent('stateManager', 'requestFinalData') + this.requestFinalData(queueEntry, missingAccounts) + queueEntry.lastFinalDataRequestTimestamp = shardusGetTime() + continue + } + } else { + nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData not missing accounts') + } + + /* eslint-enable security/detect-object-injection */ + + if (failed === true) { + nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData failed') + this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) + this.updateTxState(queueEntry, 'await repair') + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitFinalData_failed', `${shortID}`, `qId: ${queueEntry.entryID} skipped:${skipped}`) + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_failed : ${queueEntry.logID} `) + continue + } + + // This is the case where awaiting final data has succeeded. Store the final data and remove TX from the queue + if (failed === false && incomplete === false) { + //setting this for completeness, but the TX will be removed from the queue at the end of this section + queueEntry.hasValidFinalData = true + + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitFinalData_passed', `${shortID}`, `qId: ${queueEntry.entryID} skipped:${skipped}`) + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_passed : ${queueEntry.logID} skipped:${skipped}`) + + //TODO vote order should be in apply response order! + //This matters for certain daps only. No longer important to shardeum + const rawAccounts = [] + const accountRecords: Shardus.WrappedData[] = [] + /* eslint-disable security/detect-object-injection */ + for (let i = 0; i < signedReceipt.proposal.accountIDs.length; i++) { + const accountID = signedReceipt.proposal.accountIDs[i] + //skip accounts we don't store + if (accountsNotStored.has(accountID)) { + continue + } + const wrappedAccount = queueEntry.collectedFinalData[accountID] + rawAccounts.push(wrappedAccount.data) + accountRecords.push(wrappedAccount) + } + + nestedCountersInstance.countEvent( + 'stateManager', + `shrd_awaitFinalData got data, time to save it ${accountRecords.length}` + ) + /* eslint-enable security/detect-object-injection */ + //await this.app.setAccountData(rawAccounts) + const awaitStart = shardusGetTime() + /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.checkAndSetAccountData()' ) + await this.stateManager.checkAndSetAccountData( + accountRecords, + `txId: ${queueEntry.logID} awaitFinalData_passed`, + false + ) + + /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.checkAndSetAccountData()', DebugComplete.Completed ) + queueEntry.accountDataSet = true + // endpoint to allow dapp to execute something that depends on a transaction being approved. + this.app.transactionReceiptPass( + queueEntry.acceptedTx.data, + queueEntry.collectedFinalData, + queueEntry?.preApplyTXResult?.applyResponse, + false + ) + /* prettier-ignore */ if (logFlags.verbose) console.log('transactionReceiptPass 1', queueEntry.acceptedTx.txId, queueEntry) + this.updateSimpleStatsObject( + processStats.awaitStats, + 'checkAndSetAccountData', + shardusGetTime() - awaitStart + ) + + //log tx processed if needed + if ( + queueEntry != null && + queueEntry.transactionGroup != null && + this.p2p.getNodeId() === queueEntry.transactionGroup[0].id + ) { + if (queueEntry.globalModification === false) { + //temp way to make global modifying TXs not over count + this.stateManager.eventEmitter.emit('txProcessed') + } + } + + if ( + queueEntry.receivedSignedReceipt?.proposal?.applied === true || + queueEntry.signedReceipt?.proposal?.applied === true + ) { + this.updateTxState(queueEntry, 'pass') + } else { + /* prettier-ignore */ + if (logFlags.debug) this.mainLogger.error(`shrd_awaitFinalData_fail : ${queueEntry.logID} no receivedSignedReceipt. signedReceipt: ${utils.stringifyReduce(queueEntry.signedReceipt)}`); + this.updateTxState(queueEntry, 'fail') + } + this.removeFromQueue(queueEntry, currentIndex) + } + } else { + nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData noVote') + // todo: what to do if we have no vote? discuss with Omar + } + } else { + const upstreamTx = this.processQueue_getUpstreamTx(seenAccounts, queueEntry) + if (queueEntry.executionDebug == null) queueEntry.executionDebug = {} + queueEntry.executionDebug.logFinalData = `has all final data, but busy. upstreamTx: ${upstreamTx?.logID}` + if (upstreamTx == null) { + queueEntry.executionDebug.logFinalData = `has all final data, but busy. upstreamTx: null` + nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData busy. upstreamTx: null') + } else { + if (upstreamTx.acceptedTx.txId === queueEntry.acceptedTx.txId) { + nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData busy. upstreamTx same tx') + } else { + nestedCountersInstance.countEvent( + 'stateManager', + `shrd_awaitFinalData busy. upstream tx state: ${upstreamTx?.state}` + ) + } + } + } + } + if (queueEntry.state === 'commiting') { + await handleCommitingState.call(this, queueEntry, currentIndex, seenAccounts, currentTime, processStats, shortID, localRestartCounter, app) + } + if (queueEntry.state === 'canceled') { + ///////////////////////////////////////////////--canceled--//////////////////////////////////////////////////////////// + //need to review this state look unused + this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) + this.removeFromQueue(queueEntry, currentIndex) + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 canceled : ${queueEntry.logID} `) + nestedCountersInstance.countEvent('stateManager', 'canceled') + } + } finally { + this.profiler.profileSectionEnd(`process-${pushedProfilerTag}`) + if (logFlags.profiling_verbose) + profilerInstance.scopedProfileSectionEnd(`scoped-process-${pushedProfilerTag}`) + + //let do some more stats work + const txElapsed = shardusGetTime() - txStartTime + if (queueEntry.state != pushedProfilerTag) { + processStats.stateChanged++ + this.updateSimpleStatsObject(processStats.stateChangedStats, pushedProfilerTag, txElapsed) + } else { + processStats.sameState++ + this.updateSimpleStatsObject(processStats.sameStateStats, pushedProfilerTag, txElapsed) + } + + pushedProfilerTag = null // clear the tag + } + } + } finally { + //Handle an odd case where the finally did not catch exiting scope. + if (pushedProfilerTag != null) { + this.profiler.profileSectionEnd(`process-${pushedProfilerTag}`) + this.profiler.profileSectionEnd(`process-patched1-${pushedProfilerTag}`) + pushedProfilerTag = null + } + + const processTime = shardusGetTime() - startTime + + processStats.totalTime = processTime + + this.finalizeSimpleStatsObject(processStats.awaitStats) + this.finalizeSimpleStatsObject(processStats.sameStateStats) + this.finalizeSimpleStatsObject(processStats.stateChangedStats) + + this.lastProcessStats['latest'] = processStats + if (processTime > 10000) { + nestedCountersInstance.countEvent('stateManager', 'processTime > 10s') + this.statemanager_fatal( + `processAcceptedTxQueue excceded time ${processTime / 1000} firstTime:${firstTime}`, + `processAcceptedTxQueue excceded time ${ + processTime / 1000 + } firstTime:${firstTime} stats:${Utils.safeStringify(processStats)}` + ) + this.lastProcessStats['10+'] = processStats + } else if (processTime > 5000) { + nestedCountersInstance.countEvent('stateManager', 'processTime > 5s') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`processTime > 5s ${processTime / 1000} stats:${Utils.safeStringify(processStats)}`) + this.lastProcessStats['5+'] = processStats + } else if (processTime > 2000) { + nestedCountersInstance.countEvent('stateManager', 'processTime > 2s') + /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`processTime > 2s ${processTime / 1000} stats:${Utils.safeStringify(processStats)}`) + this.lastProcessStats['2+'] = processStats + } else if (processTime > 1000) { + nestedCountersInstance.countEvent('stateManager', 'processTime > 1s') + /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`processTime > 1s ${processTime / 1000} stats:${Utils.safeStringify(processStats)}`) + this.lastProcessStats['1+'] = processStats + } + + // restart loop if there are still elements in it + if (this._transactionQueue.length > 0 || this.pendingTransactionQueue.length > 0) { + this.transactionQueueHasRemainingWork = true + setTimeout(() => { + this.stateManager.tryStartTransactionProcessingQueue() + }, 15) + } else { + if (logFlags.seqdiagram) + this.mainLogger.info( + `0x10052024 ${ipInfo.externalIp} ${shardusGetTime()} 0x0000 processTransactions _transactionQueue.length 0` + ) + this.transactionQueueHasRemainingWork = false + } + + this.transactionProcessingQueueRunning = false + this.processingLastRunTime = shardusGetTime() + this.stateManager.lastSeenAccountsMap = seenAccounts + + this.profiler.profileSectionEnd('processQ') + } + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.debug.ts b/src/state-manager/TransactionQueue.debug.ts new file mode 100644 index 000000000..8f3e6593c --- /dev/null +++ b/src/state-manager/TransactionQueue.debug.ts @@ -0,0 +1,269 @@ +import { shardusGetTime } from '../network' +import * as Self from '../p2p/Self' +import * as utils from '../utils' +import { QueueEntry } from './state-manager-types' +import { DebugComplete } from './TransactionQueue' + +const txStatBucketSize = { + default: [1, 2, 4, 8, 16, 30, 60, 125, 250, 500, 1000, 2000, 4000, 8000, 10000, 20000, 30000, 60000, 100000], +} + +export const debugMethods = { + dumpTxDebugToStatList(queueEntry: QueueEntry): void { + this.txDebugStatList.set(queueEntry.acceptedTx.txId, { ...queueEntry.txDebug }) + }, + + clearTxDebugStatList(): void { + this.txDebugStatList.clear() + }, + + printTxDebugByTxId(txId: string): string { + // get the txStat from the txDebugStatList + const txStat = this.txDebugStatList.get(txId) + if (txStat == null) { + return 'No txStat found' + } + let resultStr = '' + for (const key in txStat.duration) { + resultStr += `${key}: start:${txStat.startTimestamp[key]} end:${txStat.endTimestamp[key]} ${txStat.duration[key]} ms\n` + } + return resultStr + }, + + printTxDebug(): string { + const collector = {} + const totalTxCount = this.txDebugStatList.size() + + const indexes = [ + 'aging', + 'processing', + 'awaiting data', + 'preApplyTransaction', + 'consensing', + 'commiting', + 'await final data', + 'expired', + 'total_queue_time', + 'pass', + 'fail', + ] + + /* eslint-disable security/detect-object-injection */ + for (const [txId, txStat] of this.txDebugStatList.entries()) { + for (const key in txStat.duration) { + if (!collector[key]) { + collector[key] = {} + for (const bucket of txStatBucketSize.default) { + collector[key][bucket] = [] + } + } + const duration = txStat.duration[key] + for (const bucket of txStatBucketSize.default) { + if (duration < bucket) { + collector[key][bucket].push(duration) + break + } + } + } + } + const sortedCollector = {} + for (const key of indexes) { + sortedCollector[key] = { ...collector[key] } + } + /* eslint-enable security/detect-object-injection */ + const lines = [] + lines.push(`=> Total Transactions: ${totalTxCount}`) + for (const [key, collectorForThisKey] of Object.entries(sortedCollector)) { + lines.push(`\n => Tx ${key}: \n`) + for (let i = 0; i < Object.keys(collectorForThisKey).length; i++) { + // eslint-disable-next-line security/detect-object-injection + const time = Object.keys(collectorForThisKey)[i] + // eslint-disable-next-line security/detect-object-injection + const arr = collectorForThisKey[time] + if (!arr) continue + const percentage = (arr.length / totalTxCount) * 100 + const blockCount = Math.round(percentage / 2) + const blockStr = '|'.repeat(blockCount) + const lowerLimit = i === 0 ? 0 : Object.keys(collectorForThisKey)[i - 1] + const upperLimit = time + const bucketDescription = `${lowerLimit} ms - ${upperLimit} ms:`.padEnd(19, ' ') + lines.push(`${bucketDescription} ${arr.length} ${percentage.toFixed(1).padEnd(5, ' ')}% ${blockStr} `) + } + } + + const strToPrint = lines.join('\n') + return strToPrint + }, + txDebugMarkStartTime(queueEntry: QueueEntry, state: string): void { + if (queueEntry.txDebug.startTime[state] == null) { + queueEntry.txDebug.startTime[state] = process.hrtime() + queueEntry.txDebug.startTimestamp[state] = shardusGetTime() + } + }, + txDebugMarkEndTime(queueEntry: QueueEntry, state: string): void { + if (queueEntry.txDebug.startTime[state]) { + const endTime = process.hrtime(queueEntry.txDebug.startTime[state]) + queueEntry.txDebug.endTime[state] = endTime + queueEntry.txDebug.endTimestamp[state] = shardusGetTime() + + const durationInNanoseconds = endTime[0] * 1e9 + endTime[1] + const durationInMilliseconds = durationInNanoseconds / 1e6 + + queueEntry.txDebug.duration[state] = durationInMilliseconds + + delete queueEntry.txDebug.startTime[state] + delete queueEntry.txDebug.endTime[state] + } + }, + clearDebugAwaitStrings(): void { + this.debugLastAwaitedCall = '' + this.debugLastAwaitedCallInner = '' + this.debugLastAwaitedAppCall = '' + this.debugLastAwaitedCallInnerStack = {} + this.debugLastAwaitedAppCallStack = {} + }, + getDebugProccessingStatus(): unknown { + let txDebug = '' + if (this.debugRecentQueueEntry != null) { + const app = this.app + const queueEntry = this.debugRecentQueueEntry + txDebug = `logID:${queueEntry.logID} state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification}` + txDebug += ` qId: ${queueEntry.entryID} values: ${this.processQueue_debugAccountData( + queueEntry, + app + )} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}` + } + return { + isStuckProcessing: this.isStuckProcessing, + transactionProcessingQueueRunning: this.transactionProcessingQueueRunning, + stuckProcessingCount: this.stuckProcessingCount, + stuckProcessingCyclesCount: this.stuckProcessingCyclesCount, + stuckProcessingQueueLockedCyclesCount: this.stuckProcessingQueueLockedCyclesCount, + processingLastRunTime: this.processingLastRunTime, + debugLastProcessingQueueStartTime: this.debugLastProcessingQueueStartTime, + debugLastAwaitedCall: this.debugLastAwaitedCall, + debugLastAwaitedCallInner: this.debugLastAwaitedCallInner, + debugLastAwaitedAppCall: this.debugLastAwaitedAppCall, + debugLastAwaitedCallInnerStack: this.debugLastAwaitedCallInnerStack, + debugLastAwaitedAppCallStack: this.debugLastAwaitedAppCallStack, + txDebug, + //todo get the transaction we are stuck on. what type is it? id etc. + } + }, + + clearStuckProcessingDebugVars(): void { + this.isStuckProcessing = false + this.debugLastAwaitedCall = '' + this.debugLastAwaitedCallInner = '' + this.debugLastAwaitedAppCall = '' + this.debugLastAwaitedCallInnerStack = {} + this.debugLastAwaitedAppCallStack = {} + + this.debugRecentQueueEntry = null + this.debugLastProcessingQueueStartTime = 0 + + this.stuckProcessingCount = 0 + this.stuckProcessingCyclesCount = 0 + this.stuckProcessingQueueLockedCyclesCount = 0 + }, + setDebugLastAwaitedCall(label: string, complete = DebugComplete.Incomplete): void { + this.debugLastAwaitedCall = label + (complete === DebugComplete.Completed ? ' complete' : '') + this.debugLastAwaitedCallInner = '' + this.debugLastAwaitedAppCall = '' + }, + + setDebugLastAwaitedCallInner(label: string, complete = DebugComplete.Incomplete): void { + this.debugLastAwaitedCallInner = label + (complete === DebugComplete.Completed ? ' complete' : '') + this.debugLastAwaitedAppCall = '' + + if (complete === DebugComplete.Incomplete) { + // eslint-disable-next-line security/detect-object-injection + if (this.debugLastAwaitedCallInnerStack[label] == null) { + // eslint-disable-next-line security/detect-object-injection + this.debugLastAwaitedCallInnerStack[label] = 1 + } else { + // eslint-disable-next-line security/detect-object-injection + this.debugLastAwaitedCallInnerStack[label]++ + } + } else { + //decrement the count if it is greater than 1, delete the key if the count is 1 + // eslint-disable-next-line security/detect-object-injection + if (this.debugLastAwaitedCallInnerStack[label] != null) { + // eslint-disable-next-line security/detect-object-injection + if (this.debugLastAwaitedCallInnerStack[label] > 1) { + // eslint-disable-next-line security/detect-object-injection + this.debugLastAwaitedCallInnerStack[label]-- + } else { + // eslint-disable-next-line security/detect-object-injection + delete this.debugLastAwaitedCallInnerStack[label] + } + } + } + }, + setDebugSetLastAppAwait(label: string, complete = DebugComplete.Incomplete): void { + this.debugLastAwaitedAppCall = label + (complete === DebugComplete.Completed ? ' complete' : '') + + if (complete === DebugComplete.Incomplete) { + // eslint-disable-next-line security/detect-object-injection + if (this.debugLastAwaitedAppCallStack[label] == null) { + // eslint-disable-next-line security/detect-object-injection + this.debugLastAwaitedAppCallStack[label] = 1 + } else { + // eslint-disable-next-line security/detect-object-injection + this.debugLastAwaitedAppCallStack[label]++ + } + } else { + //decrement the count if it is greater than 1, delete the key if the count is 1 + // eslint-disable-next-line security/detect-object-injection + if (this.debugLastAwaitedAppCallStack[label] != null) { + // eslint-disable-next-line security/detect-object-injection + if (this.debugLastAwaitedAppCallStack[label] > 1) { + // eslint-disable-next-line security/detect-object-injection + this.debugLastAwaitedAppCallStack[label]-- + } else { + // eslint-disable-next-line security/detect-object-injection + delete this.debugLastAwaitedAppCallStack[label] + } + } + } + }, + getDebugQueueInfo(queueEntry: QueueEntry): any { + return { + txId: queueEntry.acceptedTx.txId, + tx: queueEntry.acceptedTx, + logID: queueEntry.logID, + nodeId: Self.id, + state: queueEntry.state, + hasAll: queueEntry.hasAll, + hasShardInfo: queueEntry.hasShardInfo, + isExecutionNode: queueEntry.isInExecutionHome, + globalModification: queueEntry.globalModification, + entryID: queueEntry.entryID, + txGroupCyle: queueEntry.txGroupCycle, + uniqueKeys: queueEntry.uniqueKeys, + collectedData: queueEntry.collectedData, + finalData: queueEntry.collectedFinalData, + preApplyResult: queueEntry.preApplyTXResult, + txAge: shardusGetTime() - queueEntry.acceptedTx.timestamp, + lastFinalDataRequestTimestamp: queueEntry.lastFinalDataRequestTimestamp, + dataSharedTimestamp: queueEntry.dataSharedTimestamp, + firstVoteTimestamp: queueEntry.firstVoteReceivedTimestamp, + lastVoteTimestamp: queueEntry.lastVoteReceivedTimestamp, + // firstConfirmationsTimestamp: queueEntry.firstConfirmOrChallengeTimestamp, + // robustBestConfirmation: queueEntry.receivedBestConfirmation, + // robustBestVote: queueEntry.receivedBestVote, + // robustBestChallenge: queueEntry.receivedBestChallenge, + // completedRobustVote: queueEntry.robustQueryVoteCompleted, + // completedRobustChallenge: queueEntry.robustQueryConfirmOrChallengeCompleted, + txDebug: queueEntry.txDebug, + executionDebug: queueEntry.executionDebug, + waitForReceiptOnly: queueEntry.waitForReceiptOnly, + ourVote: queueEntry.ourVote || null, + signedReceipt: this.stateManager.getSignedReceipt(queueEntry) || null, + // uniqueChallenges: queueEntry.uniqueChallengesCount, + collectedVoteCount: queueEntry.collectedVoteHashes.length, + simpleDebugStr: this.app.getSimpleTxDebugValue ? this.app.getSimpleTxDebugValue(queueEntry.acceptedTx?.data) : '', + } + } + +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.entry.ts b/src/state-manager/TransactionQueue.entry.ts new file mode 100644 index 000000000..406c55be3 --- /dev/null +++ b/src/state-manager/TransactionQueue.entry.ts @@ -0,0 +1,1679 @@ +import { SignedObject } from '@shardeum-foundation/lib-crypto-utils' +import { StateManager as StateManagerTypes, Utils } from '@shardeum-foundation/lib-types' +import { Node } from '@shardeum-foundation/lib-types/build/src/p2p/NodeListTypes' +import { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import * as Context from '../p2p/Context' +import { config as configContext } from '../p2p/Context' +import * as CycleChain from '../p2p/CycleChain' +import * as NodeList from '../p2p/NodeList' +import { activeByIdOrder, byPubKey, potentiallyRemoved } from '../p2p/NodeList' +import * as Self from '../p2p/Self' +import * as Shardus from '../shardus/shardus-types' +import { RequestReceiptForTxReqSerialized, serializeRequestReceiptForTxReq } from '../types/RequestReceiptForTxReq' +import { RequestReceiptForTxRespSerialized, deserializeRequestReceiptForTxResp } from '../types/RequestReceiptForTxResp' +import { RequestStateForTxReq, serializeRequestStateForTxReq } from '../types/RequestStateForTxReq' +import { RequestStateForTxRespSerialized, deserializeRequestStateForTxResp } from '../types/RequestStateForTxResp' +import { ResponseError } from '../types/ResponseError' +import { SpreadTxToGroupSyncingReq, serializeSpreadTxToGroupSyncingReq } from '../types/SpreadTxToGroupSyncingReq' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import * as utils from '../utils' +import { errorToStringFull } from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { profilerInstance } from '../utils/profiler' +import ShardFunctions from './shardFunctions' +import { AcceptedTx, Proposal, QueueEntry, SignedReceipt, StringBoolObjectMap, StringNodeObjectMap } from './state-manager-types' + +import { TransactionQueueContext } from './TransactionQueue.context' + +export const entryMethods = { + + routeAndQueueAcceptedTransaction( + this: TransactionQueueContext, + acceptedTx: AcceptedTx, + sendGossip = true, + sender: Shardus.Node | null, + globalModification: boolean, + noConsensus: boolean + ): string | boolean { + // dropping these too early.. hmm we finished syncing before we had the first shard data. + // if (this.stateManager.currentCycleShardData == null) { + // // this.preTXQueue.push(acceptedTX) + // return 'notReady' // it is too early to care about the tx + // } + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('routeAndQueueAcceptedTransaction-debug', '', `sendGossip:${sendGossip} globalModification:${globalModification} noConsensus:${noConsensus} this.readyforTXs:${this.stateManager.accountSync.readyforTXs} hasshardData:${this.stateManager.currentCycleShardData != null} acceptedTx:${utils.stringifyReduce(acceptedTx)} `) + if (this.stateManager.accountSync.readyforTXs === false) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction too early for TX: this.readyforTXs === false`) + return 'notReady' // it is too early to care about the tx + } + if (this.stateManager.currentCycleShardData == null) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction too early for TX: this.stateManager.currentCycleShardData == null`) + return 'notReady' + } + + try { + this.profiler.profileSectionStart('enqueue') + + if (this.stateManager.accountGlobals.hasknownGlobals == false) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction too early for TX: hasknownGlobals == false`) + return 'notReady' + } + + const keysResponse = acceptedTx.keys + const timestamp = acceptedTx.timestamp + const txId = acceptedTx.txId + + // This flag turns of consensus for all TXs for debuggging + if (this.stateManager.debugNoTxVoting === true) { + noConsensus = true + } + + if (configContext.stateManager.waitUpstreamTx) { + const keysToCheck = [] + if (acceptedTx.shardusMemoryPatterns && acceptedTx.shardusMemoryPatterns.rw) { + keysToCheck.push(...acceptedTx.shardusMemoryPatterns.rw) + } + if (acceptedTx.shardusMemoryPatterns && acceptedTx.shardusMemoryPatterns.wo) { + keysToCheck.push(...acceptedTx.shardusMemoryPatterns.wo) + } + if (keysToCheck.length === 0) { + const sourceKey = acceptedTx.keys.sourceKeys[0] + keysToCheck.push(sourceKey) + } + for (const key of keysToCheck) { + const isAccountInQueue = this.isAccountInQueue(key) + if (isAccountInQueue) { + nestedCountersInstance.countEvent( + 'stateManager', + `cancel enqueue, isAccountInQueue ${key} ${isAccountInQueue}` + ) + return false + } + } + } + + let cycleNumber = this.stateManager.currentCycleShardData.cycleNumber + if (Context.config.stateManager.deterministicTXCycleEnabled) { + cycleNumber = CycleChain.getCycleNumberFromTimestamp( + acceptedTx.timestamp - Context.config.stateManager.reduceTimeFromTxTimestamp, + true, + false + ) + if (cycleNumber > this.stateManager.currentCycleShardData.cycleNumber) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction derived txGroupCycle > currentCycleShardData.cycleNumber. txId:${txId} txGroupCycle:${cycleNumber} currentCycleShardData.cycleNumber:${this.stateManager.currentCycleShardData.cycleNumber}`) + nestedCountersInstance.countEvent('stateManager', 'derived txGroupCycle is larger than current cycle') + if (Context.config.stateManager.fallbackToCurrentCycleFortxGroup) { + cycleNumber = this.stateManager.currentCycleShardData.cycleNumber + } + } else if (cycleNumber < this.stateManager.currentCycleShardData.cycleNumber) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction derived txGroupCycle < currentCycleShardData.cycleNumber. txId:${txId} txGroupCycle:${cycleNumber} currentCycleShardData.cycleNumber:${this.stateManager.currentCycleShardData.cycleNumber}`) + nestedCountersInstance.countEvent('stateManager', 'derived txGroupCycle is less than current cycle') + } else if (cycleNumber === this.stateManager.currentCycleShardData.cycleNumber) { + nestedCountersInstance.countEvent('stateManager', 'derived txGroupCycle is same as current cycle') + } + } + + this.queueEntryCounter++ + const txQueueEntry: QueueEntry = { + gossipedCompleteData: false, + eligibleNodeIdsToConfirm: new Set(), + eligibleNodeIdsToVote: new Set(), + acceptedTx: acceptedTx, + uniqueTags: this.app.getUniqueAppTags?.(acceptedTx.data.tx), + txKeys: keysResponse, + executionShardKey: null, + isInExecutionHome: true, + shardusMemoryPatternSets: null, + noConsensus, + collectedData: {}, + collectedFinalData: {}, + originalData: {}, + beforeHashes: {}, + homeNodes: {}, + patchedOnNodes: new Map(), + hasShardInfo: false, + state: 'aging', + dataCollected: 0, + hasAll: false, + entryID: this.queueEntryCounter, + localKeys: {}, + localCachedData: {}, + syncCounter: 0, + didSync: false, + queuedBeforeMainSyncComplete: false, + didWakeup: false, + syncKeys: [], + logstate: '', + requests: {}, + globalModification: globalModification, + collectedVotes: [], + collectedVoteHashes: [], + pendingConfirmOrChallenge: new Map(), + pendingVotes: new Map(), + waitForReceiptOnly: false, + m2TimeoutReached: false, + debugFail_voteFlip: false, + debugFail_failNoRepair: false, + requestingReceipt: false, + cycleToRecordOn: -5, + involvedPartitions: [], + involvedGlobalPartitions: [], + shortReceiptHash: '', + requestingReceiptFailed: false, + approximateCycleAge: cycleNumber, + ourNodeInTransactionGroup: false, + ourNodeInConsensusGroup: false, + logID: '', + txGroupDebug: '', + uniqueWritableKeys: [], + txGroupCycle: 0, + updatedTxGroupCycle: 0, + updatedTransactionGroup: null, + receiptEverRequested: false, + repairStarted: false, + repairFailed: false, + hasValidFinalData: false, + pendingDataRequest: false, + queryingFinalData: false, + lastFinalDataRequestTimestamp: 0, + newVotes: false, + fromClient: sendGossip, + gossipedReceipt: false, + gossipedVote: false, + gossipedConfirmOrChallenge: false, + completedConfirmedOrChallenge: false, + uniqueChallengesCount: 0, + uniqueChallenges: {}, + archived: false, + ourTXGroupIndex: -1, + ourExGroupIndex: -1, + involvedReads: {}, + involvedWrites: {}, + txDebug: { + enqueueHrTime: process.hrtime(), + startTime: {}, + endTime: {}, + duration: {}, + startTimestamp: {}, + endTimestamp: {}, + }, + executionGroupMap: new Map(), + executionNodeIdSorted: [], + txSieveTime: 0, + debug: {}, + voteCastAge: 0, + dataSharedTimestamp: 0, + firstVoteReceivedTimestamp: 0, + firstConfirmOrChallengeTimestamp: 0, + lastVoteReceivedTimestamp: 0, + lastConfirmOrChallengeTimestamp: 0, + robustQueryVoteCompleted: false, + robustQueryConfirmOrChallengeCompleted: false, + acceptVoteMessage: true, + acceptConfirmOrChallenge: true, + accountDataSet: false, + topConfirmations: new Set(), + topVoters: new Set(), + hasRobustConfirmation: false, + sharedCompleteData: false, + correspondingGlobalOffset: 0, + isSenderWrappedTxGroup: {}, + isNGT: this.app.isNGT(acceptedTx.data?.tx), + } // age comes from timestamp + this.txDebugMarkStartTime(txQueueEntry, 'total_queue_time') + this.txDebugMarkStartTime(txQueueEntry, 'aging') + + // todo faster hash lookup for this maybe? + const entry = this.getQueueEntrySafe(acceptedTx.txId) // , acceptedTx.timestamp) + if (entry) { + return false // already in our queue, or temp queue + } + + txQueueEntry.logID = utils.makeShortHash(acceptedTx.txId) + + this.stateManager.debugTXHistory[txQueueEntry.logID] = 'enteredQueue' + + if (this.app.canDebugDropTx(acceptedTx.data)) { + if ( + this.stateManager.testFailChance( + this.stateManager.loseTxChance, + 'loseTxChance', + txQueueEntry.logID, + '', + logFlags.verbose + ) === true + ) { + return 'lost' + } + if ( + this.stateManager.testFailChance( + this.stateManager.voteFlipChance, + 'voteFlipChance', + txQueueEntry.logID, + '', + logFlags.verbose + ) === true + ) { + txQueueEntry.debugFail_voteFlip = true + } + + if ( + globalModification === false && + this.stateManager.testFailChance( + this.stateManager.failNoRepairTxChance, + 'failNoRepairTxChance', + txQueueEntry.logID, + '', + logFlags.verbose + ) === true + ) { + txQueueEntry.debugFail_failNoRepair = true + } + } + + try { + // use shardusGetTime() instead of Date.now as many thing depend on it + const age = shardusGetTime() - timestamp + + const keyHash: StringBoolObjectMap = {} //TODO replace with Set + for (const key of txQueueEntry.txKeys.allKeys) { + if (key == null) { + // throw new Error(`routeAndQueueAcceptedTransaction key == null ${key}`) + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction key == null ${timestamp} not putting tx in queue.`) + return false + } + + // eslint-disable-next-line security/detect-object-injection + keyHash[key] = true + } + txQueueEntry.uniqueKeys = Object.keys(keyHash) + + if (txQueueEntry.txKeys.allKeys == null || txQueueEntry.txKeys.allKeys.length === 0) { + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction allKeys == null || allKeys.length === 0 ${timestamp} not putting tx in queue.`) + return false + } + let cycleShardData = this.stateManager.currentCycleShardData + if (Context.config.stateManager.deterministicTXCycleEnabled) { + txQueueEntry.txGroupCycle = cycleNumber + cycleShardData = this.stateManager.shardValuesByCycle.get(cycleNumber) + } + txQueueEntry.txDebug.cycleSinceActivated = + cycleNumber - activeByIdOrder.find((node) => node.id === Self.id).activeCycle + + if (cycleShardData == null) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction logID:${txQueueEntry.logID} cycleShardData == null cycle:${cycleNumber} not putting tx in queue.`) + nestedCountersInstance.countEvent('stateManager', 'routeAndQueueAcceptedTransaction cycleShardData == null') + return false + } + + this.updateHomeInformation(txQueueEntry) + + //set the executionShardKey for the transaction + if (txQueueEntry.globalModification === false && this.executeInOneShard) { + //USE the first key in the list of all keys. Applications much carefully sort this list + //so that we start in the optimal shard. This will matter less when shard hopping is implemented + txQueueEntry.executionShardKey = txQueueEntry.txKeys.allKeys[0] + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction set executionShardKey tx:${txQueueEntry.logID} ts:${timestamp} executionShardKey: ${utils.stringifyReduce(txQueueEntry.executionShardKey)} `) + + // we were doing this in queueEntryGetTransactionGroup. moved it earlier. + const { homePartition } = ShardFunctions.addressToPartition( + cycleShardData.shardGlobals, + txQueueEntry.executionShardKey + ) + + const homeShardData = cycleShardData.parititionShardDataMap.get(homePartition) + + //set the nodes that are in the executionGroup. + //This is needed so that consensus will expect less nodes to be voting + const unRankedExecutionGroup = homeShardData.homeNodes[0].consensusNodeForOurNodeFull.slice() + if (this.usePOQo) { + txQueueEntry.executionGroup = this.orderNodesByRank(unRankedExecutionGroup, txQueueEntry.acceptedTx.txId) + } else if (this.useNewPOQ) { + txQueueEntry.executionGroup = this.orderNodesByRank(unRankedExecutionGroup, txQueueEntry.acceptedTx.txId) + } else { + txQueueEntry.executionGroup = unRankedExecutionGroup + } + // for the new FACT algorithm + txQueueEntry.executionNodeIdSorted = txQueueEntry.executionGroup.map((node) => node.id).sort() + + if (txQueueEntry.isInExecutionHome) { + txQueueEntry.ourNodeRank = BigInt(this.computeNodeRank( + cycleShardData.ourNode.id, + txQueueEntry.acceptedTx.txId + )) + } + + const minNodesToVote = 3 + const voterPercentage = configContext.stateManager.voterPercentage + const numberOfVoters = Math.max( + minNodesToVote, + Math.floor(txQueueEntry.executionGroup.length * voterPercentage) + ) + // voters are highest ranked nodes + txQueueEntry.eligibleNodeIdsToVote = new Set( + txQueueEntry.executionGroup.slice(0, numberOfVoters).map((node) => node.id) + ) + + // confirm nodes are lowest ranked nodes + txQueueEntry.eligibleNodeIdsToConfirm = new Set( + txQueueEntry.executionGroup + .slice(txQueueEntry.executionGroup.length - numberOfVoters) + .map((node) => node.id) + ) + + // calculate globalOffset for FACT + // take last 2 bytes of the txId and convert it to an integer + txQueueEntry.correspondingGlobalOffset = parseInt(txId.slice(-4), 16) + + const ourID = cycleShardData.ourNode.id + for (let idx = 0; idx < txQueueEntry.executionGroup.length; idx++) { + // eslint-disable-next-line security/detect-object-injection + const node = txQueueEntry.executionGroup[idx] + txQueueEntry.executionGroupMap.set(node.id, node) + if (node.id === ourID) { + txQueueEntry.ourExGroupIndex = idx + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: executor index ${txQueueEntry.ourExGroupIndex}:${(node as Shardus.NodeWithRank).rank}`) + } + } + if (txQueueEntry.eligibleNodeIdsToConfirm.has(Self.id)) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: confirmator`) + } + if (txQueueEntry.eligibleNodeIdsToVote.has(Self.id)) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: voter`) + } + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize voters ${txQueueEntry.eligibleNodeIdsToConfirm.size}`) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize confirmators ${txQueueEntry.eligibleNodeIdsToConfirm.size}`) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize execution ${txQueueEntry.executionGroup.length}`) + + //if we are not in the execution group then set isInExecutionHome to false + if (txQueueEntry.executionGroupMap.has(cycleShardData.ourNode.id) === false) { + txQueueEntry.isInExecutionHome = false + } + + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction info ${txQueueEntry.logID} isInExecutionHome:${txQueueEntry.isInExecutionHome} hasShardInfo:${txQueueEntry.hasShardInfo}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('routeAndQueueAcceptedTransaction', `routeAndQueueAcceptedTransaction info ${txQueueEntry.logID} isInExecutionHome:${txQueueEntry.isInExecutionHome} hasShardInfo:${txQueueEntry.hasShardInfo} executionShardKey:${utils.makeShortHash(txQueueEntry.executionShardKey)}`) + /* prettier-ignore */ if (this.stateManager.consensusLog) this.mainLogger.debug(`routeAndQueueAcceptedTransaction info ${txQueueEntry.logID} isInExecutionHome:${txQueueEntry.isInExecutionHome}`) + } + + // calculate information needed for receiptmap + txQueueEntry.cycleToRecordOn = CycleChain.getCycleNumberFromTimestamp(timestamp) + /* prettier-ignore */ if (logFlags.verbose) console.log('Cycle number from timestamp', timestamp, txQueueEntry.cycleToRecordOn) + if (txQueueEntry.cycleToRecordOn < 0) { + nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'caused Enqueue fail') + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction failed to calculate cycle ${timestamp} error code:${txQueueEntry.cycleToRecordOn}`) + return false + } + if (txQueueEntry.cycleToRecordOn == null) { + this.statemanager_fatal( + `routeAndQueueAcceptedTransaction cycleToRecordOn==null`, + `routeAndQueueAcceptedTransaction cycleToRecordOn==null ${txQueueEntry.logID} ${timestamp}` + ) + } + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueInsertion_start', txQueueEntry.logID, `${txQueueEntry.logID} uniqueKeys:${utils.stringifyReduce(txQueueEntry.uniqueKeys)} txKeys: ${utils.stringifyReduce(txQueueEntry.txKeys)} cycleToRecordOn:${txQueueEntry.cycleToRecordOn}`) + + // Look at our keys and log which are known global accounts. Set global accounts for keys if this is a globalModification TX + for (const key of txQueueEntry.uniqueKeys) { + if (globalModification === true) { + if (this.stateManager.accountGlobals.isGlobalAccount(key)) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', `routeAndQueueAcceptedTransaction - has account:${utils.stringifyReduce(key)}`) + } else { + //this makes the code aware that this key is for a global account. + //is setting this here too soon? + //it should be that p2p has already checked the receipt before calling shardus.push with global=true + this.stateManager.accountGlobals.setGlobalAccount(key) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', `routeAndQueueAcceptedTransaction - set account:${utils.stringifyReduce(key)}`) + } + } + } + + // slightly different flag that didsync. This is less about if our address range was done syncing (which can happen any time) + // and just a simple check to see if this was queued before the main sync phase. + // for now, just used for more detailed logging so we can sort out if problem TXs were from shortly before we were fully done + // but after a sync range was finished, (used shortly below in the age check) + txQueueEntry.queuedBeforeMainSyncComplete = this.stateManager.accountSync.dataSyncMainPhaseComplete + + // Check to see if any keys are inside of a syncing range. + // If it is a global key in a non-globalModification TX then we dont care about it + + if (age > this.stateManager.queueSitTime * 0.9) { + if (txQueueEntry.didSync === true) { + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `enqueue old TX didSync === true queuedBeforeMainSyncComplete:${txQueueEntry.queuedBeforeMainSyncComplete}`) + } else { + /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `enqueue old TX didSync === false queuedBeforeMainSyncComplete:${txQueueEntry.queuedBeforeMainSyncComplete}`) + if (txQueueEntry.queuedBeforeMainSyncComplete) { + //only a fatal if it was after the main sync phase was complete. + this.statemanager_fatal( + `routeAndQueueAcceptedTransaction_olderTX`, + 'routeAndQueueAcceptedTransaction working on older tx ' + timestamp + ' age: ' + age + ) + // TODO consider throwing this out. right now it is just a warning + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_oldQueueInsertion', '', 'routeAndQueueAcceptedTransaction working on older tx ' + timestamp + ' age: ' + age) + } + } + } + + // Refine our list of which keys will be updated in this transaction : uniqueWritableKeys + for (const key of txQueueEntry.uniqueKeys) { + const isGlobalAcc = this.stateManager.accountGlobals.isGlobalAccount(key) + + // if it is a global modification and global account we can write + if (globalModification === true && isGlobalAcc === true) { + txQueueEntry.uniqueWritableKeys.push(key) + } + // if it is a normal transaction and non global account we can write + if (globalModification === false && isGlobalAcc === false) { + txQueueEntry.uniqueWritableKeys.push(key) + } + } + txQueueEntry.uniqueWritableKeys.sort() //need this list to be deterministic! + + if (txQueueEntry.hasShardInfo) { + const transactionGroup = this.queueEntryGetTransactionGroup(txQueueEntry) + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize transaction ${txQueueEntry.transactionGroup.length}`) + if (txQueueEntry.ourNodeInTransactionGroup || txQueueEntry.didSync === true) { + // go ahead and calculate this now if we are in the tx group or we are syncing this range! + this.queueEntryGetConsensusGroup(txQueueEntry) + + // populate isSenderWrappedTxGroup + for (const accountId of txQueueEntry.uniqueKeys) { + const homeNodeShardData = txQueueEntry.homeNodes[accountId] + const consensusGroupForAccount = homeNodeShardData.consensusNodeForOurNodeFull.map((n) => n.id) + const startAndEndIndices = this.getStartAndEndIndexOfTargetGroup( + consensusGroupForAccount, + txQueueEntry.transactionGroup + ) + const isWrapped = startAndEndIndices.endIndex < startAndEndIndices.startIndex + if (isWrapped === false) continue + const unwrappedEndIndex = startAndEndIndices.endIndex + txQueueEntry.transactionGroup.length + for (let i = startAndEndIndices.startIndex; i < unwrappedEndIndex; i++) { + if (i >= txQueueEntry.transactionGroup.length) { + const wrappedIndex = i - txQueueEntry.transactionGroup.length + txQueueEntry.isSenderWrappedTxGroup[txQueueEntry.transactionGroup[wrappedIndex].id] = i + } + } + } + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction isSenderWrappedTxGroup ${txQueueEntry.logID} ${utils.stringifyReduce(txQueueEntry.isSenderWrappedTxGroup)}`) + } + if (sendGossip && txQueueEntry.globalModification === false) { + try { + if (transactionGroup.length > 1) { + // should consider only forwarding in some cases? + this.stateManager.debugNodeGroup(txId, timestamp, `share to neighbors`, transactionGroup) + this.p2p.sendGossipIn( + 'spread_tx_to_group', + acceptedTx, + '', + sender, + transactionGroup, + true, + -1, + acceptedTx.txId + ) + /* prettier-ignore */ if (logFlags.verbose) console.log( 'spread_tx_to_group', txId, txQueueEntry.executionGroup.length, txQueueEntry.conensusGroup.length, txQueueEntry.transactionGroup.length ) + this.addOriginalTxDataToForward(txQueueEntry) + } + // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('tx_homeGossip', `${txId}`, `AcceptedTransaction: ${acceptedTX}`) + } catch (ex) { + this.statemanager_fatal(`txQueueEntry_ex`, 'txQueueEntry: ' + utils.stringifyReduce(txQueueEntry)) + } + } + + if (txQueueEntry.didSync === false) { + // see if our node shard data covers any of the accounts? + if (txQueueEntry.ourNodeInTransactionGroup === false && txQueueEntry.globalModification === false) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_notInTxGroup', `${txQueueEntry.logID}`, ``) + return 'out of range' // we are done, not involved!!! + } else { + // If we have syncing neighbors forward this TX to them + if (this.config.debug.forwardTXToSyncingNeighbors && cycleShardData.hasSyncingNeighbors === true) { + let send_spread_tx_to_group_syncing = true + //todo turn this back on if other testing goes ok + if (txQueueEntry.ourNodeInTransactionGroup === false) { + /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'spread_tx_to_group_syncing-skipped2') + send_spread_tx_to_group_syncing = false + } else if (txQueueEntry.ourTXGroupIndex > 0) { + const everyN = Math.max(1, Math.floor(txQueueEntry.transactionGroup.length * 0.4)) + const nonce = parseInt('0x' + txQueueEntry.acceptedTx.txId.substring(0, 2)) + const idxPlusNonce = txQueueEntry.ourTXGroupIndex + nonce + const idxModEveryN = idxPlusNonce % everyN + if (idxModEveryN > 0) { + /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'spread_tx_to_group_syncing-skipped') + send_spread_tx_to_group_syncing = false + } + } + if (send_spread_tx_to_group_syncing) { + /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'spread_tx_to_group_syncing-notSkipped') + + // only send non global modification TXs + if (txQueueEntry.globalModification === false) { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction: spread_tx_to_group ${txQueueEntry.logID}`) + /* prettier-ignore */ + if (logFlags.playback) this.logger.playbackLogNote("shrd_sync_tx", `${txQueueEntry.logID}`, `txts: ${timestamp} nodes:${utils.stringifyReduce(cycleShardData.syncingNeighborsTxGroup.map((x) => x.id))}`) + + this.stateManager.debugNodeGroup( + txId, + timestamp, + `share to syncing neighbors`, + cycleShardData.syncingNeighborsTxGroup + ) + + if (logFlags.seqdiagram) { + for (const node of cycleShardData.syncingNeighborsTxGroup) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${acceptedTx.txId} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'spread_tx_to_group_syncing'}`) + } + } + const request = acceptedTx as SpreadTxToGroupSyncingReq + this.p2p.tellBinary( + cycleShardData.syncingNeighborsTxGroup, + InternalRouteEnum.binary_spread_tx_to_group_syncing, + request, + serializeSpreadTxToGroupSyncingReq, + {} + ) + } else { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction: bugfix detected. avoid forwarding txs where globalModification == true ${txQueueEntry.logID}`) + } + } + } + } + } + } else { + throw new Error('missing shard info') + } + + this.computeTxSieveTime(txQueueEntry) + + if ( + this.config.debug.useShardusMemoryPatterns && + acceptedTx.shardusMemoryPatterns != null && + acceptedTx.shardusMemoryPatterns.ro != null + ) { + txQueueEntry.shardusMemoryPatternSets = { + ro: new Set(acceptedTx.shardusMemoryPatterns.ro), + rw: new Set(acceptedTx.shardusMemoryPatterns.rw), + wo: new Set(acceptedTx.shardusMemoryPatterns.wo), + on: new Set(acceptedTx.shardusMemoryPatterns.on), + ri: new Set(acceptedTx.shardusMemoryPatterns.ri), + } + nestedCountersInstance.countEvent('transactionQueue', 'shardusMemoryPatternSets included') + } else { + nestedCountersInstance.countEvent('transactionQueue', 'shardusMemoryPatternSets not included') + } + + // This call is not awaited. It is expected to be fast and will be done in the background. + this.queueEntryPrePush(txQueueEntry) + + this.pendingTransactionQueue.push(txQueueEntry) + this.pendingTransactionQueueByID.set(txQueueEntry.acceptedTx.txId, txQueueEntry) + + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txQueueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: pendingQ`) + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_txPreQueued', `${txQueueEntry.logID}`, `${txQueueEntry.logID} gm:${txQueueEntry.globalModification}`) + // start the queue if needed + this.stateManager.tryStartTransactionProcessingQueue() + } catch (error) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_addtoqueue_rejected', `${txId}`, `AcceptedTransaction: ${txQueueEntry.logID} ts: ${txQueueEntry.txKeys.timestamp} acc: ${utils.stringifyReduce(txQueueEntry.txKeys.allKeys)}`) + this.statemanager_fatal( + `routeAndQueueAcceptedTransaction_ex`, + 'routeAndQueueAcceptedTransaction failed: ' + errorToStringFull(error) + ) + throw new Error(error) + } + return true + } finally { + this.profiler.profileSectionEnd('enqueue') + } + }, + + async queueEntryPrePush(this: TransactionQueueContext, txQueueEntry: QueueEntry): Promise { + this.profiler.profileSectionStart('queueEntryPrePush', true) + this.profiler.scopedProfileSectionStart('queueEntryPrePush', true) + // Pre fetch immutable read account data for this TX + if ( + this.config.features.enableRIAccountsCache && + txQueueEntry.shardusMemoryPatternSets && + txQueueEntry.shardusMemoryPatternSets.ri && + txQueueEntry.shardusMemoryPatternSets.ri.size > 0 + ) { + for (const key of txQueueEntry.shardusMemoryPatternSets.ri) { + /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'queueEntryPrePush_ri') + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.info(`queueEntryPrePush: fetching immutable data for tx ${txQueueEntry.acceptedTx.txId} key ${key}`) + const accountData = await this.stateManager.getLocalOrRemoteAccount(key, { + useRICache: true, + }) + if (accountData != null) { + this.app.setCachedRIAccountData([accountData]) + this.queueEntryAddData( + txQueueEntry, + { + accountId: accountData.accountId, + stateId: accountData.stateId, + data: accountData.data, + timestamp: accountData.timestamp, + syncData: accountData.syncData, + accountCreated: false, + isPartial: false, + }, + false + ) + /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'queueEntryPrePush_ri_added') + } + } + } + this.profiler.scopedProfileSectionEnd('queueEntryPrePush') + this.profiler.profileSectionStart('queueEntryPrePush', true) + }, + + /** + * getQueueEntry + * get a queue entry from the current queue + * @param txid + */ + getQueueEntry(this: TransactionQueueContext, txid: string): QueueEntry | null { + const queueEntry = this._transactionQueueByID.get(txid) + if (queueEntry === undefined) { + return null + } + return queueEntry + }, + + /** + * getQueueEntrySafe + * get a queue entry from the queue or the pending queue (but not archive queue) + * @param txid + */ + getQueueEntrySafe(this: TransactionQueueContext, txid: string): QueueEntry | null { + let queueEntry = this._transactionQueueByID.get(txid) + if (queueEntry === undefined) { + queueEntry = this.pendingTransactionQueueByID.get(txid) + if (queueEntry === undefined) { + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`getQueueEntrySafe failed to find: ${utils.stringifyReduce(txid)}`) + nestedCountersInstance.countEvent('getQueueEntrySafe', 'failed to find returning null') + return null + } + } + return queueEntry + }, + + /** + * getQueueEntryArchived + * get a queue entry from the archive queue only + * @param txid + * @param msg + */ + getQueueEntryArchived(this: TransactionQueueContext, txid: string, msg: string): QueueEntry | null { + const queueEntry = this.archivedQueueEntriesByID.get(txid) + if (queueEntry != null) { + return queueEntry + } + nestedCountersInstance.countRareEvent('error', `getQueueEntryArchived no entry: ${msg}`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`getQueueEntryArchived failed to find: ${utils.stringifyReduce(txid)} ${msg} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(txid)]}`) + return null + }, + + getArchivedQueueEntryByAccountIdAndHash(this: TransactionQueueContext, accountId: string, hash: string, msg: string): QueueEntry | null { + try { + let foundQueueEntry = false + let foundVote = false + let foundVoteMatchingHash = false + for (const queueEntry of this.archivedQueueEntriesByID.values()) { + if (queueEntry.uniqueKeys.includes(accountId)) { + foundQueueEntry = true + const signedReceipt: SignedReceipt = this.stateManager.getSignedReceipt(queueEntry) + let proposal: Proposal | null = null + if (signedReceipt != null) { + proposal = signedReceipt.proposal + if (signedReceipt.proposal) + nestedCountersInstance.countEvent( + 'getArchivedQueueEntryByAccountIdAndHash', + 'get proposal from signedReceipt' + ) + } + if (proposal == null) { + proposal = queueEntry.ourProposal + if (queueEntry.receivedBestVote) + nestedCountersInstance.countEvent( + 'getArchivedQueueEntryByAccountIdAndHash', + 'get proposal' + ' from' + ' queueEntry.ourProposal' + ) + } + if (proposal == null) { + continue + } + foundVote = true + // this node might not have a vote for this tx + for (let i = 0; i < proposal.accountIDs.length; i++) { + // eslint-disable-next-line security/detect-object-injection + if (proposal.accountIDs[i] === accountId) { + // eslint-disable-next-line security/detect-possible-timing-attacks, security/detect-object-injection + if (proposal.afterStateHashes[i] === hash) { + foundVoteMatchingHash = true + return queueEntry + } + } + } + } + } + nestedCountersInstance.countRareEvent('error', `getQueueEntryArchived no entry: ${msg}`) + nestedCountersInstance.countEvent( + 'error', + `getQueueEntryArchived no entry: ${msg}, found queue entry: ${foundQueueEntry}, found vote: ${foundVote}, found vote matching hash: ${foundVoteMatchingHash}` + ) + return null + } catch (e) { + this.statemanager_fatal(`getArchivedQueueEntryByAccountIdAndHash`, `error: ${e.message}`) + return null + } + }, + /** + * getQueueEntryArchived + * get a queue entry from the archive queue only + * @param txid + * @param msg + */ + getQueueEntryArchivedByTimestamp(this: TransactionQueueContext, timestamp: number, msg: string): QueueEntry | null { + for (const queueEntry of this.archivedQueueEntriesByID.values()) { + if (queueEntry.acceptedTx.timestamp === timestamp) { + return queueEntry + } + } + nestedCountersInstance.countRareEvent('error', `getQueueEntryArchived no entry: ${msg}`) + nestedCountersInstance.countEvent('error', `getQueueEntryArchived no entry: ${msg}`) + return null + }, + + /** + * queueEntryAddData + * add data to a queue entry + * // TODO CODEREVIEW. need to look at the use of local cache. also is the early out ok? + * @param queueEntry + * @param data + */ + queueEntryAddData(this: TransactionQueueContext, queueEntry: QueueEntry, data: Shardus.WrappedResponse, signatureCheck = false): void { + if (queueEntry.uniqueKeys == null) { + nestedCountersInstance.countEvent('queueEntryAddData', 'uniqueKeys == null') + // cant have all data yet if we dont even have unique keys. + throw new Error( + `Attempting to add data and uniqueKeys are not available yet: ${utils.stringifyReduceLimit(queueEntry, 200)}` + ) + } + if (queueEntry.collectedData[data.accountId] != null) { + if (configContext.stateManager.collectedDataFix) { + // compare the timestamps and keep the newest + const existingData = queueEntry.collectedData[data.accountId] + if (data.timestamp > existingData.timestamp) { + queueEntry.collectedData[data.accountId] = data + nestedCountersInstance.countEvent('queueEntryAddData', 'collectedDataFix replace with newer data') + } else { + nestedCountersInstance.countEvent('queueEntryAddData', 'already collected 1') + return + } + } else { + // we have already collected this data + nestedCountersInstance.countEvent('queueEntryAddData', 'already collected 2') + return + } + } + profilerInstance.profileSectionStart('queueEntryAddData', true) + // check the signature of each account data + if (signatureCheck && (data.sign == null || data.sign.owner == null || data.sign.sig == null)) { + this.mainLogger.fatal(`queueEntryAddData: data.sign == null ${utils.stringifyReduce(data)}`) + nestedCountersInstance.countEvent('queueEntryAddData', 'data.sign == null') + return + } + + if (signatureCheck) { + const dataSenderPublicKey = data.sign.owner + const dataSenderNode: Shardus.Node = byPubKey[dataSenderPublicKey] + if (dataSenderNode == null) { + nestedCountersInstance.countEvent('queueEntryAddData', 'dataSenderNode == null') + return + } + const consensusNodesForAccount = queueEntry.homeNodes[data.accountId]?.consensusNodeForOurNodeFull + if ( + consensusNodesForAccount == null || + consensusNodesForAccount.map((n) => n.id).includes(dataSenderNode.id) === false + ) { + nestedCountersInstance.countEvent( + 'queueEntryAddData', + 'data sender node is not in the consensus group of the' + ' account' + ) + return + } + + const singedData = data as SignedObject + + if (this.crypto.verify(singedData) === false) { + nestedCountersInstance.countEvent('queueEntryAddData', 'data signature verification failed') + return + } + } + + queueEntry.collectedData[data.accountId] = data + queueEntry.dataCollected = Object.keys(queueEntry.collectedData).length + + //make a deep copy of the data + queueEntry.originalData[data.accountId] = Utils.safeJsonParse(Utils.safeStringify(data)) + queueEntry.beforeHashes[data.accountId] = data.stateId + + if (queueEntry.dataCollected === queueEntry.uniqueKeys.length) { + // queueEntry.tx Keys.allKeys.length + queueEntry.hasAll = true + // this.gossipCompleteData(queueEntry) + if (queueEntry.executionGroup && queueEntry.executionGroup.length > 1) + this.shareCompleteDataToNeighbours(queueEntry) + if (logFlags.debug || this.stateManager.consensusLog) { + this.mainLogger.debug( + `queueEntryAddData hasAll: true for txId ${queueEntry.logID} ${ + queueEntry.acceptedTx.txId + } at timestamp: ${shardusGetTime()} nodeId: ${Self.id} collected ${ + Object.keys(queueEntry.collectedData).length + } uniqueKeys ${queueEntry.uniqueKeys.length}` + ) + } + } + + if (data.localCache) { + queueEntry.localCachedData[data.accountId] = data.localCache + delete data.localCache + } + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_addData', `${queueEntry.logID}`, `key ${utils.makeShortHash(data.accountId)} hash: ${utils.makeShortHash(data.stateId)} hasAll:${queueEntry.hasAll} collected:${queueEntry.dataCollected} ${queueEntry.acceptedTx.timestamp}`) + profilerInstance.profileSectionStart('queueEntryAddData', true) + }, + + /** + * queueEntryHasAllData + * Test if the queueEntry has all the data it needs. + * TODO could be slightly more if it only recalculated when dirty.. but that would add more state and complexity, + * so wait for this to show up in the profiler before fixing + * @param queueEntry + */ + queueEntryHasAllData(this: TransactionQueueContext, queueEntry: QueueEntry): boolean { + if (queueEntry.hasAll === true) { + return true + } + if (queueEntry.uniqueKeys == null) { + throw new Error(`queueEntryHasAllData (queueEntry.uniqueKeys == null)`) + } + let dataCollected = 0 + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (queueEntry.collectedData[key] != null) { + dataCollected++ + } + } + if (dataCollected === queueEntry.uniqueKeys.length) { + // queueEntry.tx Keys.allKeys.length uniqueKeys.length + queueEntry.hasAll = true + return true + } + return false + }, + + queueEntryListMissingData(this: TransactionQueueContext, queueEntry: QueueEntry): string[] { + if (queueEntry.hasAll === true) { + return [] + } + if (queueEntry.uniqueKeys == null) { + throw new Error(`queueEntryListMissingData (queueEntry.uniqueKeys == null)`) + } + const missingAccounts = [] + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (queueEntry.collectedData[key] == null) { + missingAccounts.push(key) + } + } + + return missingAccounts + }, + + /** + * queueEntryRequestMissingData + * ask other nodes for data that is missing for this TX. + * normally other nodes in the network should foward data to us at the correct time. + * This is only for the case that a TX has waited too long and not received the data it needs. + * @param queueEntry + */ + async queueEntryRequestMissingData(this: TransactionQueueContext, queueEntry: QueueEntry): Promise { + if (this.stateManager.currentCycleShardData == null) { + return + } + + if (queueEntry.pendingDataRequest === true) { + return + } + queueEntry.pendingDataRequest = true + + nestedCountersInstance.countEvent('processing', 'queueEntryRequestMissingData-start') + + if (!queueEntry.requests) { + queueEntry.requests = {} + } + if (queueEntry.uniqueKeys == null) { + throw new Error('queueEntryRequestMissingData queueEntry.uniqueKeys == null') + } + + const allKeys = [] + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (queueEntry.collectedData[key] == null) { + allKeys.push(key) + } + } + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_start', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} AccountsMissing:${utils.stringifyReduce(allKeys)}`) + + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (queueEntry.collectedData[key] == null && queueEntry.requests[key] == null) { + let keepTrying = true + let triesLeft = 5 + // let triesLeft = Math.min(5, consensusGroup.length ) + // let nodeIndex = 0 + while (keepTrying) { + if (triesLeft <= 0) { + keepTrying = false + break + } + triesLeft-- + // eslint-disable-next-line security/detect-object-injection + const homeNodeShardData = queueEntry.homeNodes[key] // mark outstanding request somehow so we dont rerequest + + // let node = consensusGroup[nodeIndex] + // nodeIndex++ + + // find a random node to ask that is not us + let node = null + let randomIndex: number + let foundValidNode = false + let maxTries = 1000 + + // todo make this non random!!!. It would be better to build a list and work through each node in order and then be finished + // we have other code that does this fine. + while (foundValidNode == false) { + maxTries-- + randomIndex = this.stateManager.getRandomInt(homeNodeShardData.consensusNodeForOurNodeFull.length - 1) + // eslint-disable-next-line security/detect-object-injection + node = homeNodeShardData.consensusNodeForOurNodeFull[randomIndex] + if (maxTries < 0) { + //FAILED + this.statemanager_fatal( + `queueEntryRequestMissingData`, + `queueEntryRequestMissingData: unable to find node to ask after 1000 tries tx:${ + queueEntry.logID + } key: ${utils.makeShortHash(key)} ${utils.stringifyReduce( + homeNodeShardData.consensusNodeForOurNodeFull.map((x) => (x != null ? x.id : 'null')) + )}` + ) + break + } + if (node == null) { + continue + } + if (node.id === this.stateManager.currentCycleShardData.nodeShardData.node.id) { + continue + } + foundValidNode = true + } + + if (node == null) { + continue + } + if (node.status != 'active' || potentiallyRemoved.has(node.id)) { + continue + } + if (node === this.stateManager.currentCycleShardData.ourNode) { + continue + } + + // Todo: expand this to grab a consensus node from any of the involved consensus nodes. + + for (const key2 of allKeys) { + // eslint-disable-next-line security/detect-object-injection + queueEntry.requests[key2] = node + } + + const relationString = ShardFunctions.getNodeRelation( + homeNodeShardData, + this.stateManager.currentCycleShardData.ourNode.id + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_ask', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} AccountsMissing:${utils.stringifyReduce(allKeys)}`) + + // Node Precheck! + if ( + this.stateManager.isNodeValidForInternalMessage(node.id, 'queueEntryRequestMissingData', true, true) === + false + ) { + // if(this.tryNextDataSourceNode('queueEntryRequestMissingData') == false){ + // break + // } + continue + } + + const message = { + keys: allKeys, + txid: queueEntry.acceptedTx.txId, + timestamp: queueEntry.acceptedTx.timestamp, + } + let result = null + try { + // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.requestStateForTxBinary) { + // GOLD-66 Error handling try/catch happens one layer outside of this function in process transactions + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'request_state_for_tx'}`) + result = (await this.p2p.askBinary( + node, + InternalRouteEnum.binary_request_state_for_tx, + message, + serializeRequestStateForTxReq, + deserializeRequestStateForTxResp, + {} + )) as RequestStateForTxRespSerialized + // } else { + // result = (await this.p2p.ask(node, 'request_state_for_tx', message)) as RequestStateForTxResp + // } + } catch (error) { + /* prettier-ignore */ if (logFlags.error) { + if (error instanceof ResponseError) { + this.mainLogger.error( + `ASK FAIL request_state_for_tx : exception encountered where the error is ${error}` + ) + } + } + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('askBinary request_state_for_tx exception:', error) + + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`askBinary error: ${InternalRouteEnum.binary_request_state_for_tx} asked to ${node.externalIp}:${node.externalPort}:${node.id}`) + } + + if (result == null) { + if (logFlags.verbose) { + if (logFlags.error) this.mainLogger.error('ASK FAIL request_state_for_tx') + } + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_askfailretry', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) + continue + } + if (result.success !== true) { + if (logFlags.error) this.mainLogger.error('ASK FAIL queueEntryRequestMissingData 9') + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_askfailretry2', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) + continue + } + + let dataCountReturned = 0 + const accountIdsReturned = [] + for (const data of result.stateList) { + this.queueEntryAddData(queueEntry, data) + dataCountReturned++ + accountIdsReturned.push(utils.makeShortHash(data.accountId)) + } + + if (queueEntry.hasAll === true) { + queueEntry.logstate = 'got all missing data' + } else { + queueEntry.logstate = 'failed to get data:' + queueEntry.hasAll + //This will time out and go to reciept repair mode if it does not get more data sent to it. + } + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_result', `${queueEntry.logID}`, `r:${relationString} result:${queueEntry.logstate} dataCount:${dataCountReturned} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} AccountsMissing:${utils.stringifyReduce(allKeys)} AccountsReturned:${utils.stringifyReduce(accountIdsReturned)}`) + + // queueEntry.homeNodes[key] = null + for (const key2 of allKeys) { + delete queueEntry.requests[key2] + } + + if (queueEntry.hasAll === true) { + break + } + + keepTrying = false + } + } + } + + if (queueEntry.hasAll === true) { + nestedCountersInstance.countEvent('processing', 'queueEntryRequestMissingData-success') + } else { + nestedCountersInstance.countEvent('processing', 'queueEntryRequestMissingData-failed') + + //give up and wait for receipt + queueEntry.waitForReceiptOnly = true + + if (this.config.stateManager.txStateMachineChanges) { + this.updateTxState(queueEntry, 'await final data', 'missing data') + } else { + this.updateTxState(queueEntry, 'consensing') + } + + if (logFlags.debug) + this.mainLogger.debug(`queueEntryRequestMissingData failed to get all data for: ${queueEntry.logID}`) + } + }, + + /** + * queueEntryRequestMissingReceipt + * Ask other nodes for a receipt to go with this TX + * @param queueEntry + */ + async queueEntryRequestMissingReceipt(this: TransactionQueueContext, queueEntry: QueueEntry): Promise { + if (this.stateManager.currentCycleShardData == null) { + return + } + + if (queueEntry.uniqueKeys == null) { + throw new Error('queueEntryRequestMissingReceipt queueEntry.uniqueKeys == null') + } + + if (queueEntry.requestingReceipt === true) { + return + } + + queueEntry.requestingReceipt = true + queueEntry.receiptEverRequested = true + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_start', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID}`) + + const consensusGroup = this.queueEntryGetConsensusGroup(queueEntry) + + this.stateManager.debugNodeGroup( + queueEntry.acceptedTx.txId, + queueEntry.acceptedTx.timestamp, + `queueEntryRequestMissingReceipt`, + consensusGroup + ) + //the outer loop here could just use the transaction group of nodes instead. but already had this working in a similar function + //TODO change it to loop the transaction group untill we get a good receipt + + //Note: we only need to get one good receipt, the loop on keys is in case we have to try different groups of nodes + let gotReceipt = false + for (const key of queueEntry.uniqueKeys) { + if (gotReceipt === true) { + break + } + + let keepTrying = true + let triesLeft = Math.min(5, consensusGroup.length) + let nodeIndex = 0 + while (keepTrying) { + if (triesLeft <= 0) { + keepTrying = false + break + } + triesLeft-- + // eslint-disable-next-line security/detect-object-injection + const homeNodeShardData = queueEntry.homeNodes[key] // mark outstanding request somehow so we dont rerequest + + // eslint-disable-next-line security/detect-object-injection + const node = consensusGroup[nodeIndex] + nodeIndex++ + + if (node == null) { + continue + } + if (node.status != 'active' || potentiallyRemoved.has(node.id)) { + continue + } + if (node === this.stateManager.currentCycleShardData.ourNode) { + continue + } + + const relationString = ShardFunctions.getNodeRelation( + homeNodeShardData, + this.stateManager.currentCycleShardData.ourNode.id + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_ask', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) + + // Node Precheck! + if ( + this.stateManager.isNodeValidForInternalMessage(node.id, 'queueEntryRequestMissingReceipt', true, true) === + false + ) { + // if(this.tryNextDataSourceNode('queueEntryRequestMissingReceipt') == false){ + // break + // } + continue + } + + const message = { txid: queueEntry.acceptedTx.txId, timestamp: queueEntry.acceptedTx.timestamp } + let result = null + // GOLD-67 to be safe this function needs a try/catch block to prevent a timeout from causing an unhandled exception + // if ( + // this.stateManager.config.p2p.useBinarySerializedEndpoints && + // this.stateManager.config.p2p.requestReceiptForTxBinary + // ) { + try { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'request_receipt_for_tx'}`) + result = await this.p2p.askBinary( + node, + InternalRouteEnum.binary_request_receipt_for_tx, + message, + serializeRequestReceiptForTxReq, + deserializeRequestReceiptForTxResp, + {} + ) + } catch (e) { + this.statemanager_fatal(`queueEntryRequestMissingReceipt`, `error: ${e.message}`) + /* prettier-ignore */ this.mainLogger.error(`askBinary error: ${InternalRouteEnum.binary_request_receipt_for_tx} asked to ${node.externalIp}:${node.externalPort}:${node.id}`) + } + + if (result == null) { + if (logFlags.verbose) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL request_receipt_for_tx ${triesLeft} ${utils.makeShortHash(node.id)}`) + } + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_askfailretry', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) + continue + } + if (result.success !== true) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL queueEntryRequestMissingReceipt 9 ${triesLeft} ${utils.makeShortHash(node.id)}:${utils.makeShortHash(node.internalPort)} note:${result.note} txid:${queueEntry.logID}`) + continue + } + + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_result', `${queueEntry.logID}`, `r:${relationString} result:${queueEntry.logstate} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} result: ${utils.stringifyReduce(result)}`) + + if (result.success === true && result.receipt != null) { + //TODO implement this!!! + queueEntry.receivedSignedReceipt = result.receipt + keepTrying = false + gotReceipt = true + + this.mainLogger.debug( + `queueEntryRequestMissingReceipt got good receipt for: ${queueEntry.logID} from: ${utils.makeShortHash( + node.id + )}:${utils.makeShortHash(node.internalPort)}` + ) + } + } + + // break the outer loop after we are done trying. todo refactor this. + if (keepTrying == false) { + break + } + } + queueEntry.requestingReceipt = false + + if (gotReceipt === false) { + queueEntry.requestingReceiptFailed = true + } + }, + + /** + * queueEntryGetTransactionGroup + * @param {QueueEntry} queueEntry + * @returns {Node[]} + */ + queueEntryGetTransactionGroup(this: TransactionQueueContext, queueEntry: QueueEntry, tryUpdate = false): Shardus.Node[] { + let cycleShardData = this.stateManager.currentCycleShardData + if (Context.config.stateManager.deterministicTXCycleEnabled) { + cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) + } + if (cycleShardData == null) { + throw new Error('queueEntryGetTransactionGroup: currentCycleShardData == null') + } + if (queueEntry.uniqueKeys == null) { + throw new Error('queueEntryGetTransactionGroup: queueEntry.uniqueKeys == null') + } + if (queueEntry.transactionGroup != null && tryUpdate != true) { + return queueEntry.transactionGroup + } + + const txGroup: Shardus.Node[] = [] + const uniqueNodes: StringNodeObjectMap = {} + + let hasNonGlobalKeys = false + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + const homeNode = queueEntry.homeNodes[key] + // txGroup = Array.concat(txGroup, homeNode.nodeThatStoreOurParitionFull) + if (homeNode == null) { + if (logFlags.verbose) this.mainLogger.debug('queueEntryGetTransactionGroup homenode:null') + } + if (homeNode.extendedData === false) { + ShardFunctions.computeExtendedNodePartitionData( + cycleShardData.shardGlobals, + cycleShardData.nodeShardDataMap, + cycleShardData.parititionShardDataMap, + homeNode, + cycleShardData.nodes + ) + } + + //may need to go back and sync this logic with how we decide what partition to save a record in. + + // If this is not a global TX then skip tracking of nodes for global accounts used as a reference. + if (queueEntry.globalModification === false) { + if (this.stateManager.accountGlobals.isGlobalAccount(key) === true) { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup skipping: ${utils.makeShortHash(key)} tx: ${queueEntry.logID}`) + continue + } else { + hasNonGlobalKeys = true + } + } + + for (const node of homeNode.nodeThatStoreOurParitionFull) { + // not iterable! + uniqueNodes[node.id] = node + if (node.id === Self.id) + if (logFlags.verbose) + /* prettier-ignore */ this.mainLogger.debug(`queueEntryGetTransactionGroup tx ${queueEntry.logID} our node coverage key ${key}`) + } + + const scratch1 = {} + for (const node of homeNode.nodeThatStoreOurParitionFull) { + // not iterable! + scratch1[node.id] = true + } + // make sure the home node is in there in case we hit and edge case + uniqueNodes[homeNode.node.id] = homeNode.node + + // TODO STATESHARDING4 is this next block even needed: + // HOMENODEMATHS need to patch in nodes that would cover this partition! + // TODO PERF make an optimized version of this in ShardFunctions that is smarter about which node range to check and saves off the calculation + // TODO PERF Update. this will scale badly with 100s or 1000s of nodes. need a faster solution that can use the list of accounts to + // build a list of nodes. + // maybe this could go on the partitions. + const { homePartition } = ShardFunctions.addressToPartition(cycleShardData.shardGlobals, key) + if (homePartition != homeNode.homePartition) { + //loop all nodes for now + for (const nodeID of cycleShardData.nodeShardDataMap.keys()) { + const nodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = + cycleShardData.nodeShardDataMap.get(nodeID) + const nodeStoresThisPartition = ShardFunctions.testInRange(homePartition, nodeShardData.storedPartitions) + /* eslint-disable security/detect-object-injection */ + if (nodeStoresThisPartition === true && uniqueNodes[nodeID] == null) { + //setting this will cause it to end up in the transactionGroup + uniqueNodes[nodeID] = nodeShardData.node + queueEntry.patchedOnNodes.set(nodeID, nodeShardData) + } + // build index for patched nodes based on the home node: + if (nodeStoresThisPartition === true) { + if (scratch1[nodeID] == null) { + homeNode.patchedOnNodes.push(nodeShardData.node) + scratch1[nodeID] = true + } + } + /* eslint-enable security/detect-object-injection */ + } + } + + //todo refactor this to where we insert the tx + if (queueEntry.globalModification === false && this.executeInOneShard && key === queueEntry.executionShardKey) { + //queueEntry.executionGroup = homeNode.consensusNodeForOurNodeFull.slice() + const executionKeys = [] + if (logFlags.verbose) { + for (const node of queueEntry.executionGroup) { + executionKeys.push(utils.makeShortHash(node.id) + `:${node.externalPort}`) + } + } + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup executeInOneShard ${queueEntry.logID} isInExecutionHome:${queueEntry.isInExecutionHome} executionGroup:${Utils.safeStringify(executionKeys)}`) + /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('queueEntryGetTransactionGroup', `queueEntryGetTransactionGroup executeInOneShard ${queueEntry.logID} isInExecutionHome:${queueEntry.isInExecutionHome} executionGroup:${Utils.safeStringify(executionKeys)}`) + } + + } + queueEntry.ourNodeInTransactionGroup = true + if (uniqueNodes[cycleShardData.ourNode.id] == null) { + queueEntry.ourNodeInTransactionGroup = false + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup not involved: hasNonG:${hasNonGlobalKeys} tx ${queueEntry.logID}`) + } + if (queueEntry.ourNodeInTransactionGroup) + if (logFlags.seqdiagram) + /* prettier-ignore */ this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: targetgroup`) + + // make sure our node is included: needed for gossip! - although we may not care about the data! + // This may seem confusing, but to gossip to other nodes, we have to have our node in the list we will gossip to + // Other logic will use queueEntry.ourNodeInTransactionGroup to know what else to do with the queue entry + uniqueNodes[cycleShardData.ourNode.id] = cycleShardData.ourNode + + const values = Object.values(uniqueNodes) + for (const v of values) { + txGroup.push(v) + } + + txGroup.sort(this.stateManager._sortByIdAsc) + if (queueEntry.ourNodeInTransactionGroup) { + const ourID = cycleShardData.ourNode.id + for (let idx = 0; idx < txGroup.length; idx++) { + // eslint-disable-next-line security/detect-object-injection + const node = txGroup[idx] + if (node.id === ourID) { + queueEntry.ourTXGroupIndex = idx + break + } + } + } + if (tryUpdate != true) { + if (Context.config.stateManager.deterministicTXCycleEnabled === false) { + queueEntry.txGroupCycle = this.stateManager.currentCycleShardData.cycleNumber + } + queueEntry.transactionGroup = txGroup + } else { + queueEntry.updatedTxGroupCycle = this.stateManager.currentCycleShardData.cycleNumber + queueEntry.transactionGroup = txGroup + } + + return txGroup + }, + + /** + * queueEntryGetConsensusGroup + * Gets a merged results of all the consensus nodes for all of the accounts involved in the transaction + * Ignores global accounts if globalModification == false and the account is global + * @param {QueueEntry} queueEntry + * @returns {Node[]} + */ + queueEntryGetConsensusGroup(this: TransactionQueueContext, queueEntry: QueueEntry): Shardus.Node[] { + let cycleShardData = this.stateManager.currentCycleShardData + if (Context.config.stateManager.deterministicTXCycleEnabled) { + cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) + } + if (cycleShardData == null) { + throw new Error('queueEntryGetConsensusGroup: currentCycleShardData == null') + } + if (queueEntry.uniqueKeys == null) { + throw new Error('queueEntryGetConsensusGroup: queueEntry.uniqueKeys == null') + } + if (queueEntry.conensusGroup != null) { + return queueEntry.conensusGroup + } + const txGroup = [] + const uniqueNodes: StringNodeObjectMap = {} + + let hasNonGlobalKeys = false + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + const homeNode = queueEntry.homeNodes[key] + if (homeNode == null) { + if (logFlags.verbose) this.mainLogger.debug('queueEntryGetConsensusGroup homenode:null') + } + if (homeNode.extendedData === false) { + ShardFunctions.computeExtendedNodePartitionData( + cycleShardData.shardGlobals, + cycleShardData.nodeShardDataMap, + cycleShardData.parititionShardDataMap, + homeNode, + cycleShardData.nodes + ) + } + + // TODO STATESHARDING4 GLOBALACCOUNTS is this next block of logic needed? + // If this is not a global TX then skip tracking of nodes for global accounts used as a reference. + if (queueEntry.globalModification === false) { + if (this.stateManager.accountGlobals.isGlobalAccount(key) === true) { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup skipping: ${utils.makeShortHash(key)} tx: ${queueEntry.logID}`) + continue + } else { + hasNonGlobalKeys = true + } + } + + for (const node of homeNode.consensusNodeForOurNodeFull) { + uniqueNodes[node.id] = node + } + + // make sure the home node is in there in case we hit and edge case + uniqueNodes[homeNode.node.id] = homeNode.node + } + queueEntry.ourNodeInConsensusGroup = true + if (uniqueNodes[cycleShardData.ourNode.id] == null) { + queueEntry.ourNodeInConsensusGroup = false + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup not involved: hasNonG:${hasNonGlobalKeys} tx ${queueEntry.logID}`) + } + + // make sure our node is included: needed for gossip! - although we may not care about the data! + uniqueNodes[cycleShardData.ourNode.id] = cycleShardData.ourNode + + const values = Object.values(uniqueNodes) + for (const v of values) { + txGroup.push(v) + } + queueEntry.conensusGroup = txGroup + return txGroup + }, + + /** + * queueEntryGetConsensusGroupForAccount + * Gets a merged results of all the consensus nodes for a specific account involved in the transaction + * Ignores global accounts if globalModification == false and the account is global + * @param {QueueEntry} queueEntry + * @returns {Node[]} + */ + queueEntryGetConsensusGroupForAccount(this: TransactionQueueContext, queueEntry: QueueEntry, accountId: string): Shardus.Node[] { + let cycleShardData = this.stateManager.currentCycleShardData + if (Context.config.stateManager.deterministicTXCycleEnabled) { + cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) + } + if (cycleShardData == null) { + throw new Error('queueEntryGetConsensusGroup: currentCycleShardData == null') + } + if (queueEntry.uniqueKeys == null) { + throw new Error('queueEntryGetConsensusGroup: queueEntry.uniqueKeys == null') + } + if (queueEntry.conensusGroup != null) { + return queueEntry.conensusGroup + } + if (queueEntry.uniqueKeys.includes(accountId) === false) { + throw new Error(`queueEntryGetConsensusGroup: account ${accountId} is not in the queueEntry.uniqueKeys`) + } + const txGroup = [] + const uniqueNodes: StringNodeObjectMap = {} + + let hasNonGlobalKeys = false + const key = accountId + // eslint-disable-next-line security/detect-object-injection + const homeNode = queueEntry.homeNodes[key] + if (homeNode == null) { + if (logFlags.verbose) this.mainLogger.debug('queueEntryGetConsensusGroup homenode:null') + } + if (homeNode.extendedData === false) { + ShardFunctions.computeExtendedNodePartitionData( + cycleShardData.shardGlobals, + cycleShardData.nodeShardDataMap, + cycleShardData.parititionShardDataMap, + homeNode, + cycleShardData.nodes + ) + } + + // TODO STATESHARDING4 GLOBALACCOUNTS is this next block of logic needed? + // If this is not a global TX then skip tracking of nodes for global accounts used as a reference. + if (queueEntry.globalModification === false) { + if (this.stateManager.accountGlobals.isGlobalAccount(key) === true) { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup skipping: ${utils.makeShortHash(key)} tx: ${queueEntry.logID}`) + } else { + hasNonGlobalKeys = true + } + } + + for (const node of homeNode.consensusNodeForOurNodeFull) { + uniqueNodes[node.id] = node + } + + // make sure the home node is in there in case we hit and edge case + uniqueNodes[homeNode.node.id] = homeNode.node + queueEntry.ourNodeInConsensusGroup = true + if (uniqueNodes[cycleShardData.ourNode.id] == null) { + queueEntry.ourNodeInConsensusGroup = false + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup not involved: hasNonG:${hasNonGlobalKeys} tx ${queueEntry.logID}`) + } + + // make sure our node is included: needed for gossip! - although we may not care about the data! + uniqueNodes[cycleShardData.ourNode.id] = cycleShardData.ourNode + + const values = Object.values(uniqueNodes) + for (const v of values) { + txGroup.push(v) + } + return txGroup + }, + + /** + * removeFromQueue remove an item from the queue and place it in the archivedQueueEntries list for awhile in case we have to access it again + * @param {QueueEntry} queueEntry + * @param {number} currentIndex + */ + removeFromQueue(this: TransactionQueueContext, queueEntry: QueueEntry, currentIndex: number, archive = true): void { + // end all the pending txDebug timers + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: removed`) + for (const key in queueEntry.txDebug.startTime) { + if (queueEntry.txDebug.startTime[key] != null) { + this.txDebugMarkEndTime(queueEntry, key) + } + } + // this.txDebugMarkEndTime(queueEntry, 'total_queue_time') + this.stateManager.eventEmitter.emit('txPopped', queueEntry.acceptedTx.txId) + if (queueEntry.txDebug) this.dumpTxDebugToStatList(queueEntry) + this._transactionQueue.splice(currentIndex, 1) + this._transactionQueueByID.delete(queueEntry.acceptedTx.txId) + + if (archive === false) { + if (logFlags.debug) this.mainLogger.debug(`removeFromQueue: ${queueEntry.logID} done. No archive`) + return + } + + queueEntry.archived = true + //compact the queue entry before we push it! + queueEntry.ourVote = null + queueEntry.collectedVotes = null + + // coalesce the receipts into applied receipt. maybe not as descriptive, but save memory. + queueEntry.appliedReceipt = + queueEntry.appliedReceipt ?? + queueEntry.recievedAppliedReceipt ?? + queueEntry.appliedReceiptForRepair ?? + queueEntry.appliedReceiptFinal + queueEntry.recievedAppliedReceipt = null + queueEntry.appliedReceiptForRepair = null + queueEntry.appliedReceiptFinal = queueEntry.appliedReceipt + + delete queueEntry.recievedAppliedReceipt + delete queueEntry.appliedReceiptForRepair + + // coalesce the receipt2s into applied receipt. maybe not as descriptive, but save memory. + queueEntry.recievedAppliedReceipt2 = null + queueEntry.appliedReceiptForRepair2 = null + + delete queueEntry.recievedAppliedReceipt2 + delete queueEntry.appliedReceiptForRepair2 + + queueEntry.signedReceipt = + queueEntry.signedReceipt ?? + queueEntry.receivedSignedReceipt ?? + queueEntry.signedReceiptForRepair ?? + queueEntry.signedReceiptFinal + queueEntry.receivedSignedReceipt = null + queueEntry.signedReceiptForRepair = null + queueEntry.signedReceiptFinal = queueEntry.signedReceipt + + delete queueEntry.receivedSignedReceipt + delete queueEntry.signedReceiptForRepair + + this.archivedQueueEntries.push(queueEntry) + + this.archivedQueueEntriesByID.set(queueEntry.acceptedTx.txId, queueEntry) + // period cleanup will usually get rid of these sooner if the list fills up + if (this.archivedQueueEntries.length > this.archivedQueueEntryMaxCount) { + this.archivedQueueEntriesByID.delete(this.archivedQueueEntries[0].acceptedTx.txId) + this.archivedQueueEntries.shift() + } + if (logFlags.debug) this.mainLogger.debug(`removeFromQueue: ${queueEntry.logID} and added to archive done`) + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.expired.ts b/src/state-manager/TransactionQueue.expired.ts new file mode 100644 index 000000000..f04d7921b --- /dev/null +++ b/src/state-manager/TransactionQueue.expired.ts @@ -0,0 +1,45 @@ +import { logFlags } from '../logger' +import * as utils from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { QueueEntry } from './state-manager-types' +import { TransactionQueueContext } from './TransactionQueue.context' + +export const expiredMethods = { + setTXExpired(this: TransactionQueueContext, queueEntry: QueueEntry, currentIndex: number, message: string): void { + /* prettier-ignore */ if (logFlags.verbose || this.stateManager.consensusLog) this.mainLogger.debug(`setTXExpired tx:${queueEntry.logID} ${message} ts:${queueEntry.acceptedTx.timestamp} debug:${utils.stringifyReduce(queueEntry.debug)} state: ${queueEntry.state}, isInExecution: ${queueEntry.isInExecutionHome}`) + this.updateTxState(queueEntry, 'expired') + this.removeFromQueue(queueEntry, currentIndex) + this.app.transactionReceiptFail( + queueEntry.acceptedTx.data, + queueEntry.collectedData, + queueEntry.preApplyTXResult?.applyResponse + ) + this.stateManager.eventEmitter.emit('txExpired', queueEntry.acceptedTx.txId) + + /* prettier-ignore */ nestedCountersInstance.countEvent( 'txExpired', `tx: ${this.app.getSimpleTxDebugValue(queueEntry.acceptedTx?.data)}` ) + + //This is really important. If we are going to expire a TX, then look to see if we already have a receipt for it. + //If so, then just go into async receipt repair mode for the TX AFTER it has been expired and removed from the queue + if (queueEntry.signedReceiptFinal != null) { + const startRepair = queueEntry.repairStarted === false + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`setTXExpired. ${queueEntry.logID} start repair:${startRepair}. update `) + if (startRepair) { + nestedCountersInstance.countEvent('repair1', 'setTXExpired: start repair') + queueEntry.signedReceiptForRepair = queueEntry.signedReceiptFinal + //todo any limits to how many repairs at once to allow? + this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) + } + } else { + nestedCountersInstance.countEvent('repair1', 'setTXExpired: no receipt to repair') + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`setTXExpired. no receipt to repair ${queueEntry.logID}`) + } + }, + + setTxAlmostExpired(this: TransactionQueueContext, queueEntry: QueueEntry, currentIndex: number, message: string): void { + /* prettier-ignore */ if (logFlags.verbose || this.stateManager.consensusLog) this.mainLogger.debug(`setTxAlmostExpired tx:${queueEntry.logID} ${message} ts:${queueEntry.acceptedTx.timestamp} debug:${utils.stringifyReduce(queueEntry.debug)}`) + // this.updateTxState(queueEntry, 'almostExpired') + queueEntry.almostExpired = true + + /* prettier-ignore */ nestedCountersInstance.countEvent("txAlmostExpired", `tx: ${this.app.getSimpleTxDebugValue(queueEntry.acceptedTx?.data)}`) + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.fact.ts b/src/state-manager/TransactionQueue.fact.ts new file mode 100644 index 000000000..846dccb61 --- /dev/null +++ b/src/state-manager/TransactionQueue.fact.ts @@ -0,0 +1,1398 @@ +import { QueueEntry } from './state-manager-types' +import * as Shardus from '../shardus/shardus-types' +import { StateManager as StateManagerTypes } from '@shardeum-foundation/lib-types' +import { logFlags } from '../logger' +import * as utils from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { shardusGetTime } from '../network' +import { Node } from '@shardeum-foundation/lib-types/build/src/p2p/NodeListTypes' +import { getCorrespondingNodes } from '../utils/fastAggregatedCorrespondingTell' +import * as Comms from '../p2p/Comms' +import { BroadcastStateReq, serializeBroadcastStateReq } from '../types/BroadcastStateReq' +import { verificationDataCombiner } from '../types/Helpers' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import * as Self from '../p2p/Self' +import { config as configContext, P2PModuleContext } from '../p2p/Context' +import * as Context from '../p2p/Context' +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { StringNodeObjectMap, WrappedResponses, RequestFinalDataResp } from './state-manager-types' +import * as NodeList from '../p2p/NodeList' +import { byPubKey, nodes } from '../p2p/NodeList' +import { verifyCorrespondingSender } from '../utils/fastAggregatedCorrespondingTell' +import { BroadcastFinalStateReq, serializeBroadcastFinalStateReq } from '../types/BroadcastFinalStateReq' +import { PoqoDataAndReceiptReq, serializePoqoDataAndReceiptReq } from '../types/PoqoDataAndReceiptReq' +import { profilerInstance } from '../utils/profiler' +import ShardFunctions from './shardFunctions' +import { DebugComplete } from './TransactionQueue' +import { Utils } from '@shardeum-foundation/lib-types' +import { RequestTxAndStateReq, serializeRequestTxAndStateReq } from '../types/RequestTxAndStateReq' +import { RequestTxAndStateResp, deserializeRequestTxAndStateResp } from '../types/RequestTxAndStateResp' + +interface TransactionQueueContext { + stateManager: any + logger: any + mainLogger: any + profiler: any + app: Shardus.App + setDebugLastAwaitedCallInner: (call: string, status?: DebugComplete) => void + queueEntryAddData: (queueEntry: QueueEntry, data: any, signatureCheck?: boolean) => void + useNewPOQ: boolean + txDebugStartTiming: (queueEntry: QueueEntry, tag: string) => void + txDebugEndTiming: (queueEntry: QueueEntry, tag: string) => void + p2p: P2PModuleContext + config: Shardus.StrictServerConfiguration + factValidateCorrespondingTellSender: (queueEntry: QueueEntry, accountId: string, senderId: string) => boolean + validateCorrespondingTellSender: (queueEntry: QueueEntry, dataKey: string, senderNodeId: string) => boolean + crypto: any + broadcastState: (nodes: Shardus.Node[], message: { stateList: Shardus.WrappedResponse[]; txid: string }, context: string) => Promise + statemanager_fatal: (key: string, log: string) => void + getStartAndEndIndexOfTargetGroup: (targetGroup: string[], transactionGroup: any[]) => { startIndex: number; endIndex: number } + executeInOneShard: boolean + getStorageGroupForAccount: (accountId: string) => any[] + seqLogger: any +} + +export const factMethods = { + /** + * tellCorrespondingNodes + * @param queueEntry + * -sends account data to the correct involved nodees + * -loads locally available data into the queue entry + */ + async tellCorrespondingNodes(this: TransactionQueueContext, queueEntry: QueueEntry): Promise { + if (this.stateManager.currentCycleShardData == null) { + throw new Error('tellCorrespondingNodes: currentCycleShardData == null') + } + if (queueEntry.uniqueKeys == null) { + throw new Error('tellCorrespondingNodes: queueEntry.uniqueKeys == null') + } + // Report data to corresponding nodes + const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData + // let correspondingEdgeNodes = [] + let correspondingAccNodes: Shardus.Node[] = [] + const dataKeysWeHave = [] + const dataValuesWeHave = [] + const datas: { [accountID: string]: Shardus.WrappedResponse } = {} + const remoteShardsByKey: { [accountID: string]: StateManagerTypes.shardFunctionTypes.NodeShardData } = {} // shard homenodes that we do not have the data for. + let loggedPartition = false + for (const key of queueEntry.uniqueKeys) { + /// test here + // let hasKey = ShardFunctions.testAddressInRange(key, ourNodeData.storedPartitions) + // todo : if this works maybe a nicer or faster version could be used + let hasKey = false + // eslint-disable-next-line security/detect-object-injection + const homeNode = queueEntry.homeNodes[key] + if (homeNode.node.id === ourNodeData.node.id) { + hasKey = true + } else { + //perf todo: this seems like a slow calculation, coult improve this + for (const node of homeNode.nodeThatStoreOurParitionFull) { + if (node.id === ourNodeData.node.id) { + hasKey = true + break + } + } + } + + // HOMENODEMATHS tellCorrespondingNodes patch the value of hasKey + // did we get patched in + if (queueEntry.patchedOnNodes.has(ourNodeData.node.id)) { + hasKey = true + } + + // for(let patchedNodeID of queueEntry.patchedOnNodes.values()){ + // } + + let isGlobalKey = false + //intercept that we have this data rather than requesting it. + if (this.stateManager.accountGlobals.isGlobalAccount(key)) { + hasKey = true + isGlobalKey = true + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', queueEntry.logID, `tellCorrespondingNodes - has`) + } + + if (hasKey === false) { + if (loggedPartition === false) { + loggedPartition = true + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false: ${utils.stringifyReduce(homeNode.nodeThatStoreOurParitionFull.map((v) => v.id))}`) + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false: full: ${utils.stringifyReduce(homeNode.nodeThatStoreOurParitionFull)}`) + } + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false key: ${utils.stringifyReduce(key)}`) + } + + if (hasKey) { + // TODO PERF is it possible that this query could be used to update our in memory cache? (this would save us from some slow look ups) later on + // when checking timestamps.. alternatively maybe there is a away we can note the timestamp with what is returned here in the queueEntry data + // and not have to deal with the cache. + // todo old: Detect if our node covers this paritition.. need our partition data + + this.profiler.profileSectionStart('process_dapp.getRelevantData') + this.profiler.scopedProfileSectionStart('process_dapp.getRelevantData') + /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData') + let data = await this.app.getRelevantData(key, queueEntry.acceptedTx.data, queueEntry.acceptedTx.appData) + /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData', DebugComplete.Completed) + this.profiler.scopedProfileSectionEnd('process_dapp.getRelevantData') + this.profiler.profileSectionEnd('process_dapp.getRelevantData') + + //only queue this up to share if it is not a global account. global accounts dont need to be shared. + + // not sure if it is correct to update timestamp like this. + // if(data.timestamp === 0){ + // data.timestamp = queueEntry.acceptedTx.timestamp + // } + + //if this is not freshly created data then we need to make a backup copy of it!! + //This prevents us from changing data before the commiting phase + if (data.accountCreated == false) { + data = utils.deepCopy(data) + } + + if (isGlobalKey === false) { + // eslint-disable-next-line security/detect-object-injection + datas[key] = data + dataKeysWeHave.push(key) + dataValuesWeHave.push(data) + } + + // eslint-disable-next-line security/detect-object-injection + queueEntry.localKeys[key] = true + // add this data to our own queue entry!! + this.queueEntryAddData(queueEntry, data, false) + } else { + // eslint-disable-next-line security/detect-object-injection + remoteShardsByKey[key] = queueEntry.homeNodes[key] + } + } + if (queueEntry.globalModification === true) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodes', queueEntry.logID, `tellCorrespondingNodes - globalModification = true, not telling other nodes`) + return + } + + let message: { stateList: Shardus.WrappedResponse[]; txid: string } + let edgeNodeIds = [] + let consensusNodeIds = [] + + const nodesToSendTo: StringNodeObjectMap = {} + const doOnceNodeAccPair = new Set() //can skip node+acc if it happens more than once. + + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (datas[key] != null) { + for (const key2 of queueEntry.uniqueKeys) { + if (key !== key2) { + // eslint-disable-next-line security/detect-object-injection + const localHomeNode = queueEntry.homeNodes[key] + // eslint-disable-next-line security/detect-object-injection + const remoteHomeNode = queueEntry.homeNodes[key2] + + const ourLocalConsensusIndex = localHomeNode.consensusNodeForOurNodeFull.findIndex( + (a) => a.id === ourNodeData.node.id + ) + if (ourLocalConsensusIndex === -1) { + continue + } + + edgeNodeIds = [] + consensusNodeIds = [] + correspondingAccNodes = [] + + const ourSendingGroupSize = localHomeNode.consensusNodeForOurNodeFull.length + + const targetConsensusGroupSize = remoteHomeNode.consensusNodeForOurNodeFull.length + const targetEdgeGroupSize = remoteHomeNode.edgeNodes.length + const pachedListSize = remoteHomeNode.patchedOnNodes.length + + // must add one to each lookup index! + const indicies = ShardFunctions.debugFastStableCorrespondingIndicies( + ourSendingGroupSize, + targetConsensusGroupSize, + ourLocalConsensusIndex + 1 + ) + const edgeIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( + ourSendingGroupSize, + targetEdgeGroupSize, + ourLocalConsensusIndex + 1 + ) + + let patchIndicies = [] + if (remoteHomeNode.patchedOnNodes.length > 0) { + patchIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( + ourSendingGroupSize, + remoteHomeNode.patchedOnNodes.length, + ourLocalConsensusIndex + 1 + ) + } + + // for each remote node lets save it's id + for (const index of indicies) { + const targetNode = remoteHomeNode.consensusNodeForOurNodeFull[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array + //only send data to the execution group + if (queueEntry.executionGroupMap.has(targetNode.id) === false) { + continue + } + + if (targetNode != null && targetNode.id !== ourNodeData.node.id) { + nodesToSendTo[targetNode.id] = targetNode + consensusNodeIds.push(targetNode.id) + } + } + for (const index of edgeIndicies) { + const targetNode = remoteHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array + if (targetNode != null && targetNode.id !== ourNodeData.node.id) { + //only send data to the execution group + if (queueEntry.executionGroupMap.has(targetNode.id) === false) { + continue + } + nodesToSendTo[targetNode.id] = targetNode + edgeNodeIds.push(targetNode.id) + } + } + + for (const index of patchIndicies) { + const targetNode = remoteHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array + //only send data to the execution group + if (queueEntry.executionGroupMap.has(targetNode.id) === false) { + continue + } + if (targetNode != null && targetNode.id !== ourNodeData.node.id) { + nodesToSendTo[targetNode.id] = targetNode + //edgeNodeIds.push(targetNode.id) + } + } + + const dataToSend = [] + // eslint-disable-next-line security/detect-object-injection + dataToSend.push(datas[key]) // only sending just this one key at a time + + // sign each account data + for (let data of dataToSend) { + data = this.crypto.sign(data) + } + + message = { stateList: dataToSend, txid: queueEntry.acceptedTx.txId } + + //build correspondingAccNodes, but filter out nodeid, account key pairs we have seen before + for (const [accountID, node] of Object.entries(nodesToSendTo)) { + const keyPair = accountID + key + if (node != null && doOnceNodeAccPair.has(keyPair) === false) { + doOnceNodeAccPair.add(keyPair) + correspondingAccNodes.push(node) + } + } + + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodes', queueEntry.logID, `tellCorrespondingNodes nodesToSendTo:${Object.keys(nodesToSendTo).length} doOnceNodeAccPair:${doOnceNodeAccPair.size} indicies:${Utils.safeStringify(indicies)} edgeIndicies:${Utils.safeStringify(edgeIndicies)} patchIndicies:${Utils.safeStringify(patchIndicies)} doOnceNodeAccPair: ${Utils.safeStringify([...doOnceNodeAccPair.keys()])} ourLocalConsensusIndex:${ourLocalConsensusIndex} ourSendingGroupSize:${ourSendingGroupSize} targetEdgeGroupSize:${targetEdgeGroupSize} targetEdgeGroupSize:${targetEdgeGroupSize} pachedListSize:${pachedListSize}`) + + if (correspondingAccNodes.length > 0) { + const remoteRelation = ShardFunctions.getNodeRelation( + remoteHomeNode, + this.stateManager.currentCycleShardData.ourNode.id + ) + const localRelation = ShardFunctions.getNodeRelation( + localHomeNode, + this.stateManager.currentCycleShardData.ourNode.id + ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_tellCorrespondingNodes', `${queueEntry.acceptedTx.txId}`, `remoteRel: ${remoteRelation} localrel: ${localRelation} qId: ${queueEntry.entryID} AccountBeingShared: ${utils.makeShortHash(key)} EdgeNodes:${utils.stringifyReduce(edgeNodeIds)} ConsesusNodes${utils.stringifyReduce(consensusNodeIds)}`) + + // Filter nodes before we send tell() + const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( + correspondingAccNodes, + 'tellCorrespondingNodes', + true, + true + ) + if (filteredNodes.length === 0) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('tellCorrespondingNodes: filterValidNodesForInternalMessage no valid nodes left to try') + return null + } + const filterdCorrespondingAccNodes = filteredNodes + + this.broadcastState(filterdCorrespondingAccNodes, message, 'tellCorrespondingNodes') + } + } + } + } + } + }, + + async factTellCorrespondingNodes(this: TransactionQueueContext, queueEntry: QueueEntry): Promise { + try { + let cycleShardData = this.stateManager.currentCycleShardData + if (Context.config.stateManager.deterministicTXCycleEnabled) { + cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) + } + if (cycleShardData == null) { + throw new Error('factTellCorrespondingNodes: cycleShardData == null') + } + if (queueEntry.uniqueKeys == null) { + throw new Error('factTellCorrespondingNodes: queueEntry.uniqueKeys == null') + } + const ourNodeData = cycleShardData.nodeShardData + const dataKeysWeHave = [] + const dataValuesWeHave = [] + const datas: { [accountID: string]: Shardus.WrappedResponse } = {} + const remoteShardsByKey: { [accountID: string]: StateManagerTypes.shardFunctionTypes.NodeShardData } = {} // shard homenodes that we do not have the data for. + let loggedPartition = false + for (const key of queueEntry.uniqueKeys) { + let hasKey = ShardFunctions.testAddressInRange(key, ourNodeData.storedPartitions) + + // HOMENODEMATHS factTellCorrespondingNodes patch the value of hasKey + // did we get patched in + if (queueEntry.patchedOnNodes.has(ourNodeData.node.id)) { + hasKey = true + } + + let isGlobalKey = false + //intercept that we have this data rather than requesting it. + if (this.stateManager.accountGlobals.isGlobalAccount(key)) { + hasKey = true + isGlobalKey = true + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', queueEntry.logID, `factTellCorrespondingNodes - has`) + } + + if (hasKey === false) { + if (loggedPartition === false) { + loggedPartition = true + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodes hasKey=false`) + } + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodes hasKey=false key: ${utils.stringifyReduce(key)}`) + } + + if (hasKey) { + // TODO PERF is it possible that this query could be used to update our in memory cache? (this would save us from some slow look ups) later on + // when checking timestamps.. alternatively maybe there is a away we can note the timestamp with what is returned here in the queueEntry data + // and not have to deal with the cache. + // todo old: Detect if our node covers this paritition.. need our partition data + + this.profiler.profileSectionStart('process_dapp.getRelevantData') + this.profiler.scopedProfileSectionStart('process_dapp.getRelevantData') + /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData') + let data = await this.app.getRelevantData(key, queueEntry.acceptedTx.data, queueEntry.acceptedTx.appData) + /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData', DebugComplete.Completed) + this.profiler.scopedProfileSectionEnd('process_dapp.getRelevantData') + this.profiler.profileSectionEnd('process_dapp.getRelevantData') + + //if this is not freshly created data then we need to make a backup copy of it!! + //This prevents us from changing data before the commiting phase + if (data.accountCreated == false) { + data = utils.deepCopy(data) + } + + //only queue this up to share if it is not a global account. global accounts dont need to be shared. + if (isGlobalKey === false) { + // eslint-disable-next-line security/detect-object-injection + datas[key] = data + dataKeysWeHave.push(key) + dataValuesWeHave.push(data) + } + + // eslint-disable-next-line security/detect-object-injection + queueEntry.localKeys[key] = true + // add this data to our own queue entry!! + this.queueEntryAddData(queueEntry, data, false) + } else { + // eslint-disable-next-line security/detect-object-injection + remoteShardsByKey[key] = queueEntry.homeNodes[key] + } + } + if (queueEntry.globalModification === true) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('factTellCorrespondingNodes', queueEntry.logID, `factTellCorrespondingNodes - globalModification = true, not telling other nodes`) + return + } + + const payload: { stateList: Shardus.WrappedResponse[]; txid: string } = { + stateList: [], + txid: queueEntry.acceptedTx.txId, + } + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (datas[key] != null) { + // eslint-disable-next-line security/detect-object-injection + payload.stateList.push(datas[key]) // only sending just this one key at a time + } + } + // sign each account data + const signedPayload = this.crypto.sign(payload) + + // prepare inputs to get corresponding indices + const ourIndexInTxGroup = queueEntry.ourTXGroupIndex + const targetGroup = queueEntry.executionNodeIdSorted + const targetGroupSize = targetGroup.length + const senderGroupSize = targetGroupSize + + // calculate target start and end indices in txGroup + const targetIndices = this.getStartAndEndIndexOfTargetGroup(targetGroup, queueEntry.transactionGroup) + const unwrappedIndex = queueEntry.isSenderWrappedTxGroup[Self.id] + + // temp logs + if (logFlags.verbose) { + this.mainLogger.debug(`factTellCorrespondingNodes: target group size`, targetGroup.length, targetGroup) + this.mainLogger.debug( + `factTellCorrespondingNodes: tx group size`, + queueEntry.transactionGroup.length, + queueEntry.transactionGroup.map((n) => n.id) + ) + this.mainLogger.debug( + `factTellCorrespondingNodes: getting corresponding indices for tx: ${queueEntry.logID}`, + ourIndexInTxGroup, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length + ) + this.mainLogger.debug(`factTellCorrespondingNodes: target group indices`, targetIndices) + } + + let correspondingIndices = getCorrespondingNodes( + ourIndexInTxGroup, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length + ) + let oldCorrespondingIndices: number[] = undefined + if (this.config.stateManager.correspondingTellUseUnwrapped) { + // can just find if any home nodes for the accounts we cover would say that our node is wrapped + // precalc shouldUnwrapSender check if any account we own shows that we are on the left side of a wrapped range + // can use partitions to check this + if (unwrappedIndex != null) { + const extraCorrespondingIndices = getCorrespondingNodes( + unwrappedIndex, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length, + queueEntry.logID + ) + if (Context.config.stateManager.concatCorrespondingTellUseUnwrapped) { + //add them + correspondingIndices = correspondingIndices.concat(extraCorrespondingIndices) + } else { + // replace them + oldCorrespondingIndices = correspondingIndices + correspondingIndices = extraCorrespondingIndices + } + //replace them + // possible optimization where we pick one or the other path based on our account index + //correspondingIndices = extraCorrespondingIndices + } + } + // check if we should avoid our index in the corresponding nodes + if (Context.config.stateManager.avoidOurIndexInFactTell && correspondingIndices.includes(ourIndexInTxGroup)) { + if (logFlags.debug) + this.mainLogger.debug( + `factTellCorrespondingNodes: avoiding our index in tx group`, + ourIndexInTxGroup, + correspondingIndices + ) + queueEntry.correspondingGlobalOffset += 1 + nestedCountersInstance.countEvent('stateManager', 'factTellCorrespondingNodes: avoiding our index in tx group') + correspondingIndices = getCorrespondingNodes( + ourIndexInTxGroup, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length + ) + let oldCorrespondingIndices: number[] = undefined + if (this.config.stateManager.correspondingTellUseUnwrapped) { + // can just find if any home nodes for the accounts we cover would say that our node is wrapped + // precalc shouldUnwrapSender check if any account we own shows that we are on the left side of a wrapped range + // can use partitions to check this + if (unwrappedIndex != null) { + const extraCorrespondingIndices = getCorrespondingNodes( + unwrappedIndex, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length, + queueEntry.logID + ) + if (Context.config.stateManager.concatCorrespondingTellUseUnwrapped) { + //add them + correspondingIndices = correspondingIndices.concat(extraCorrespondingIndices) + } else { + // replace them + oldCorrespondingIndices = correspondingIndices + correspondingIndices = extraCorrespondingIndices + } + //replace them + // possible optimization where we pick one or the other path based on our account index + //correspondingIndices = extraCorrespondingIndices + } + } + if (logFlags.debug) + this.mainLogger.debug( + `factTellCorrespondingNodes: new corresponding indices after avoiding our index in tx group`, + ourIndexInTxGroup, + correspondingIndices + ) + } + + const validCorrespondingIndices = [] + for (const targetIndex of correspondingIndices) { + validCorrespondingIndices.push(targetIndex) + + // if (logFlags.debug) { + // // debug verification code + // const isValid = verifyCorrespondingSender(targetIndex, ourIndexInTxGroup, queueEntry.correspondingGlobalOffset, targetGroupSize, senderGroupSize, targetIndices.startIndex, targetIndices.endIndex, queueEntry.transactionGroup.length) + // if (logFlags.debug) this.mainLogger.debug(`factTellCorrespondingNodes: debug verifyCorrespondingSender`, ourIndexInTxGroup, '->', targetIndex, isValid); + // } + } + + const correspondingNodes = [] + for (const index of validCorrespondingIndices) { + if (index === ourIndexInTxGroup) { + continue + } + const targetNode = queueEntry.transactionGroup[index] + let targetHasOurData = false + + if (this.config.stateManager.filterReceivingNodesForTXData) { + targetHasOurData = true + for (const wrappedResponse of signedPayload.stateList) { + const accountId = wrappedResponse.accountId + const targetNodeShardData = cycleShardData.nodeShardDataMap.get(targetNode.id) + if (targetNodeShardData == null) { + targetHasOurData = false + break + } + const targetHasKey = ShardFunctions.testAddressInRange(accountId, targetNodeShardData.storedPartitions) + if (targetHasKey === false) { + targetHasOurData = false + break + } + } + } + + // send only if target needs our data + if (targetHasOurData === false) { + correspondingNodes.push(targetNode) + } + } + + const callParams = { + oi: unwrappedIndex ?? ourIndexInTxGroup, + st: targetIndices.startIndex, + et: targetIndices.endIndex, + gl: queueEntry.correspondingGlobalOffset, + tg: targetGroupSize, + sg: senderGroupSize, + tn: queueEntry.transactionGroup.length, + } + + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodes: correspondingIndices and nodes ${queueEntry.logID}`, ourIndexInTxGroup, correspondingIndices, correspondingNodes.map(n => n.id), callParams) + queueEntry.txDebug.correspondingDebugInfo = { + ourIndex: ourIndexInTxGroup, + ourUnwrappedIndex: unwrappedIndex, + callParams, + localKeys: queueEntry.localKeys, + oldCorrespondingIndices, + correspondingIndices: correspondingIndices, + correspondingNodeIds: correspondingNodes.map((n) => n.id), + } + if (correspondingNodes.length === 0) { + nestedCountersInstance.countEvent( + 'stateManager', + 'factTellCorrespondingNodes: no corresponding nodes needed to send' + ) + return + } + // Filter nodes before we send tell() + const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( + correspondingNodes, + 'factTellCorrespondingNodes', + true, + true + ) + if (filteredNodes.length === 0) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error("factTellCorrespondingNodes: filterValidNodesForInternalMessage no valid nodes left to try"); + nestedCountersInstance.countEvent( + 'stateManager', + 'factTellCorrespondingNodes: no corresponding nodes needed to send' + ) + return null + } + if (payload.stateList.length === 0) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error("factTellCorrespondingNodes: filterValidNodesForInternalMessage payload.stateList.length === 0"); + nestedCountersInstance.countEvent('stateManager', 'factTellCorrespondingNodes: payload.stateList.length === 0') + return null + } + // send payload to each node in correspondingNodes + this.broadcastState(filteredNodes, payload, 'factTellCorrespondingNodes') + } catch (error) { + /* prettier-ignore */ this.statemanager_fatal( `factTellCorrespondingNodes_ex`, 'factTellCorrespondingNodes' + utils.formatErrorMessage(error) ) + } + }, + + validateCorrespondingTellSender(this: TransactionQueueContext, queueEntry: QueueEntry, dataKey: string, senderNodeId: string): boolean { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`validateCorrespondingTellSender: data key: ${dataKey} sender node id: ${senderNodeId}`) + const receiverNode = this.stateManager.currentCycleShardData.nodeShardData + if (receiverNode == null) return false + + const receiverIsInExecutionGroup = queueEntry.executionGroupMap.has(receiverNode.node.id) + + const senderNode = this.stateManager.currentCycleShardData.nodeShardDataMap.get(senderNodeId) + if (senderNode === null) return false + + const senderHasAddress = ShardFunctions.testAddressInRange(dataKey, senderNode.storedPartitions) + + if (configContext.stateManager.shareCompleteData) { + const senderIsInExecutionGroup = queueEntry.executionGroupMap.has(senderNodeId) + + // check if sender is an execution neighouring node + const neighbourNodes = utils.selectNeighbors( + queueEntry.executionGroup, + queueEntry.ourExGroupIndex, + 2 + ) as Shardus.Node[] + const neighbourNodeIds = neighbourNodes.map((node) => node.id) + if (senderIsInExecutionGroup && neighbourNodeIds.includes(senderNodeId) === false) { + this.mainLogger.error(`validateCorrespondingTellSender: sender is an execution node but not a neighbour node`) + return false + } + if (senderIsInExecutionGroup) + nestedCountersInstance.countEvent( + 'stateManager', + 'validateCorrespondingTellSender: sender is an execution node' + ) + else + nestedCountersInstance.countEvent( + 'stateManager', + 'validateCorrespondingTellSender: sender is not an execution node' + ) + + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`validateCorrespondingTellSender: data key: ${dataKey} sender node id: ${senderNodeId} senderHasAddress: ${senderHasAddress} receiverIsInExecutionGroup: ${receiverIsInExecutionGroup} senderIsInExecutionGroup: ${senderIsInExecutionGroup}`) + if (receiverIsInExecutionGroup === true || senderHasAddress === true || senderIsInExecutionGroup === true) { + return true + } + } else { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`validateCorrespondingTellSender: data key: ${dataKey} sender node id: ${senderNodeId} senderHasAddress: ${senderHasAddress} receiverIsInExecutionGroup: ${receiverIsInExecutionGroup}`) + if (receiverIsInExecutionGroup === true || senderHasAddress === true) { + return true + } + } + + return false + }, + + factValidateCorrespondingTellSender(this: TransactionQueueContext, queueEntry: QueueEntry, dataKey: string, senderNodeId: string): boolean { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factValidateCorrespondingTellSender: txId: ${queueEntry.acceptedTx.txId} sender node id: ${senderNodeId}, receiver id: ${Self.id}`) + let cycleShardData = this.stateManager.currentCycleShardData + if (Context.config.stateManager.deterministicTXCycleEnabled) { + cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) + } + const receiverNodeShardData = cycleShardData.nodeShardData + if (receiverNodeShardData == null) { + this.mainLogger.error( + `factValidateCorrespondingTellSender: logID: ${queueEntry.logID} receiverNodeShardData == null, txGroupCycle: ${queueEntry.txGroupCycle}}` + ) + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellSender: receiverNodeShardData == null' + ) + return false + } + + const senderNodeShardData = cycleShardData.nodeShardDataMap.get(senderNodeId) + if (senderNodeShardData === null) { + this.mainLogger.error( + `factValidateCorrespondingTellSender: logID: ${queueEntry.logID} senderNodeShardData == null, txGroupCycle: ${queueEntry.txGroupCycle}}` + ) + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellSender: senderNodeShardData == null' + ) + return false + } + const senderHasAddress = ShardFunctions.testAddressInRange(dataKey, senderNodeShardData.storedPartitions) + + // check if it is a FACT sender + const receivingNodeIndex = queueEntry.ourTXGroupIndex // we are the receiver + const senderNodeIndex = queueEntry.transactionGroup.findIndex((node) => node.id === senderNodeId) + let wrappedSenderNodeIndex = null + if (queueEntry.isSenderWrappedTxGroup[senderNodeId] != null) { + wrappedSenderNodeIndex = queueEntry.isSenderWrappedTxGroup[senderNodeId] + } + const receiverGroupSize = queueEntry.executionNodeIdSorted.length + const senderGroupSize = receiverGroupSize + + const targetGroup = queueEntry.executionNodeIdSorted + const targetIndices = this.getStartAndEndIndexOfTargetGroup(targetGroup, queueEntry.transactionGroup) + + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factValidateCorrespondingTellSender: txId: ${queueEntry.acceptedTx.txId} sender node id: ${senderNodeId}, receiver id: ${Self.id} senderHasAddress: ${senderHasAddress} receivingNodeIndex: ${receivingNodeIndex} senderNodeIndex: ${senderNodeIndex} receiverGroupSize: ${receiverGroupSize} senderGroupSize: ${senderGroupSize} targetIndices: ${utils.stringifyReduce(targetIndices)}`) + + let isValidFactSender = verifyCorrespondingSender( + receivingNodeIndex, + senderNodeIndex, + queueEntry.correspondingGlobalOffset, + receiverGroupSize, + senderGroupSize, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.transactionGroup.length, + false, + queueEntry.logID + ) + if (isValidFactSender === false && wrappedSenderNodeIndex != null && wrappedSenderNodeIndex >= 0) { + // try again with wrapped sender index + isValidFactSender = verifyCorrespondingSender( + receivingNodeIndex, + wrappedSenderNodeIndex, + queueEntry.correspondingGlobalOffset, + receiverGroupSize, + senderGroupSize, + targetIndices.startIndex, + targetIndices.endIndex, + queueEntry.transactionGroup.length, + false, + queueEntry.logID + ) + } + // it maybe a FACT sender but sender does not cover the account + if (senderHasAddress === false) { + this.mainLogger.error( + `factValidateCorrespondingTellSender: logId: ${queueEntry.logID} sender does not have the address and is not a exe neighbour` + ) + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellSender: sender does not have the address and is not a exe; neighbour' + ) + return false + } + + // it is neither a FACT corresponding node nor an exe neighbour node + if (isValidFactSender === false) { + this.mainLogger.error( + `factValidateCorrespondingTellSender: logId: ${queueEntry.logID} sender is neither a valid sender nor a neighbour node isValidSender: ${isValidFactSender}` + ) + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellSender: sender is not a valid sender or a neighbour node' + ) + return false + } + return true + }, + + getStartAndEndIndexOfTargetGroup(this: TransactionQueueContext, + targetGroup: string[], + transactionGroup: (Shardus.NodeWithRank | P2PTypes.NodeListTypes.Node)[] + ): { startIndex: number; endIndex: number } { + const targetIndexes: number[] = [] + for (let i = 0; i < transactionGroup.length; i++) { + const nodeId = transactionGroup[i].id + if (targetGroup.indexOf(nodeId) >= 0) { + targetIndexes.push(i) + } + } + if (logFlags.verbose) this.mainLogger.debug(`getStartAndEndIndexOfTargetGroup: all target indexes`, targetIndexes) + const n = targetIndexes.length + let startIndex = targetIndexes[0] + // Find the pivot where the circular array starts + for (let i = 1; i < n; i++) { + if (targetIndexes[i] > targetIndexes[i - 1] + 1) { + startIndex = targetIndexes[i] + break + } + } + let endIndex = startIndex + n + if (endIndex > transactionGroup.length) { + endIndex = endIndex - transactionGroup.length + } + return { startIndex, endIndex } + }, + + factTellCorrespondingNodesFinalData(this: TransactionQueueContext, queueEntry: QueueEntry): void { + profilerInstance.profileSectionStart('factTellCorrespondingNodesFinalData', true) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('factTellCorrespondingNodesFinalData', queueEntry.logID, `factTellCorrespondingNodesFinalData - start: ${queueEntry.logID}`) + + if (this.stateManager.currentCycleShardData == null) { + throw new Error('factTellCorrespondingNodesFinalData: currentCycleShardData == null') + } + if (queueEntry.uniqueKeys == null) { + throw new Error('factTellCorrespondingNodesFinalData: queueEntry.uniqueKeys == null') + } + if (queueEntry.globalModification === true) { + throw new Error('factTellCorrespondingNodesFinalData globalModification === true') + } + + if (this.executeInOneShard && queueEntry.isInExecutionHome === false) { + throw new Error('factTellCorrespondingNodesFinalData isInExecutionHome === false') + } + if (queueEntry.executionShardKey == null || queueEntry.executionShardKey == '') { + throw new Error('factTellCorrespondingNodesFinalData executionShardKey == null or empty') + } + if (queueEntry.preApplyTXResult == null) { + throw new Error('factTellCorrespondingNodesFinalData preApplyTXResult == null') + } + + const datas: { [accountID: string]: Shardus.WrappedResponse } = {} + + const applyResponse = queueEntry.preApplyTXResult.applyResponse + let wrappedStates = this.stateManager.useAccountWritesOnly ? {} : queueEntry.collectedData + const writtenAccountsMap: WrappedResponses = {} + if (applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { + for (const writtenAccount of applyResponse.accountWrites) { + writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data + writtenAccountsMap[writtenAccount.accountId].prevStateId = wrappedStates[writtenAccount.accountId] + ? wrappedStates[writtenAccount.accountId].stateId + : '' + writtenAccountsMap[writtenAccount.accountId].prevDataCopy = wrappedStates[writtenAccount.accountId] + ? utils.deepCopy(writtenAccount.data) + : {} + + datas[writtenAccount.accountId] = writtenAccount.data + } + //override wrapped states with writtenAccountsMap which should be more complete if it included + wrappedStates = writtenAccountsMap + } + const keysToShare = Object.keys(wrappedStates) + + let message: { stateList: Shardus.WrappedResponse[]; txid: string } + + let totalShares = 0 + const targetStartIndex = 0 + const targetEndIndex = queueEntry.transactionGroup.length + const targetGroupSize = queueEntry.transactionGroup.length + + const senderIndexInTxGroup = queueEntry.ourTXGroupIndex + const senderGroupSize = queueEntry.executionGroup.length + const unwrappedIndex = queueEntry.isSenderWrappedTxGroup[Self.id] + + let correspondingIndices = getCorrespondingNodes( + senderIndexInTxGroup, + targetStartIndex, + targetEndIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length, + queueEntry.logID + ) + + if (this.config.stateManager.correspondingTellUseUnwrapped) { + if (unwrappedIndex != null) { + const extraCorrespondingIndices = getCorrespondingNodes( + unwrappedIndex, + targetStartIndex, + targetEndIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + queueEntry.transactionGroup.length, + queueEntry.logID + ) + if (Context.config.stateManager.concatCorrespondingTellUseUnwrapped) { + correspondingIndices.concat(extraCorrespondingIndices) + } else { + correspondingIndices = extraCorrespondingIndices + } + } + } + + for (const key of keysToShare) { + // eslint-disable-next-line security/detect-object-injection + if (wrappedStates[key] != null) { + if (queueEntry.ourExGroupIndex === -1) { + throw new Error( + 'factTellCorrespondingNodesFinalData: should never get here. our sending node must be in the execution group' + ) + } + const storageNodesForAccount = this.getStorageGroupForAccount(key) + const storageNodesAccountIds = new Set(storageNodesForAccount.map((node) => node.id)) + + const correspondingNodes: P2PTypes.NodeListTypes.Node[] = [] + for (const index of correspondingIndices) { + const node = queueEntry.transactionGroup[index] + if (storageNodesAccountIds.has(node.id)) { + correspondingNodes.push(node) + } + } + + //how can we be making so many calls?? + /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) { + this.logger.playbackLogNote('factTellCorrespondingNodesFinalData', queueEntry.logID, `factTellCorrespondingNodesFinalData ourIndex: ${senderIndexInTxGroup} correspondingIndices:${JSON.stringify(correspondingIndices)} correspondingNodes:${JSON.stringify(correspondingNodes.map(node => node.id))} for accounts: ${key}`) + } + + const dataToSend: Shardus.WrappedResponse[] = [] + // eslint-disable-next-line security/detect-object-injection + dataToSend.push(datas[key]) // only sending just this one key at a time + message = { stateList: dataToSend, txid: queueEntry.acceptedTx.txId } + if (correspondingNodes.length > 0) { + // Filter nodes before we send tell() + const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( + correspondingNodes, + 'factTellCorrespondingNodesFinalData', + true, + true + ) + if (filteredNodes.length === 0) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('factTellCorrespondingNodesFinalData: filterValidNodesForInternalMessage no valid nodes left to try') + //return null + continue + } + const filterdCorrespondingAccNodes = filteredNodes + const filterNodesIpPort = filterdCorrespondingAccNodes.map( + (node) => node.externalIp + ':' + node.externalPort + ) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.debug('tellcorrernodingnodesfinaldata', queueEntry.logID, ` : filterValidNodesForInternalMessage ${filterNodesIpPort} for accounts: ${utils.stringifyReduce(message.stateList)}`) + // convert legacy message to binary supported type + const request = message as BroadcastFinalStateReq + if (logFlags.seqdiagram) { + for (const node of filterdCorrespondingAccNodes) { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_finalstate'}`) + } + } + + // if (this.usePOQo) { + // && this.config.p2p.useBinarySerializedEndpoints && Context.config.p2p.poqoDataAndReceiptBinary) { + Comms.tellBinary( + filterdCorrespondingAccNodes, + InternalRouteEnum.binary_poqo_data_and_receipt, + { + finalState: message, + receipt: queueEntry.signedReceipt, + txGroupCycle: queueEntry.txGroupCycle, + }, + serializePoqoDataAndReceiptReq, + {} + ) + // } else if (this.usePOQo) { + // this.p2p.tell( + // filterdCorrespondingAccNodes, + // 'poqo-data-and-receipt', + // { + // finalState: message, + // receipt: queueEntry.appliedReceipt2 + // } + // ) + // } else //if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.broadcastFinalStateBinary) { + // this.p2p.tellBinary( + // filterdCorrespondingAccNodes, + // InternalRouteEnum.binary_broadcast_finalstate, + // request, + // serializeBroadcastFinalStateReq, + // { + // verification_data: verificationDataCombiner( + // message.txid, + // message.stateList.length.toString() + // ), + // } + // ) + // } else { + // this.p2p.tell(filterdCorrespondingAccNodes, 'broadcast_finalstate', message) + // } + totalShares++ + } + } + } + + nestedCountersInstance.countEvent('factTellCorrespondingNodesFinalData', 'totalShares', totalShares) + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodesFinalData - end: ${queueEntry.logID} totalShares:${totalShares}`) + profilerInstance.profileSectionEnd('factTellCorrespondingNodesFinalData', true) + }, + + factValidateCorrespondingTellFinalDataSender(this: TransactionQueueContext, queueEntry: QueueEntry, senderNodeId: string): boolean { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factValidateCorrespondingTellFinalDataSender: txId: ${queueEntry.acceptedTx.txId} sender node id: ${senderNodeId}, receiver id: ${Self.id}`) + const senderNode = NodeList.nodes.get(senderNodeId) + if (senderNode === null) { + /* prettier-ignore */ if(logFlags.error) this.mainLogger.error(`factValidateCorrespondingTellFinalDataSender: logId: ${queueEntry.logID} sender node is null`) + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellFinalDataSender: sender node is null' + ) + return false + } + const senderIsInExecutionGroup = queueEntry.executionGroupMap.has(senderNodeId) + + if (senderIsInExecutionGroup === false) { + /* prettier-ignore */ if(logFlags.error) this.mainLogger.error(`factValidateCorrespondingTellFinalDataSender: logId: ${queueEntry.logID} sender is not in the execution group sender:${senderNodeId}`) + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellFinalDataSender: sender is not in the execution group' + ) + return false + } + + let senderNodeIndex = queueEntry.transactionGroup.findIndex((node) => node.id === senderNodeId) + if (queueEntry.isSenderWrappedTxGroup[senderNodeId] != null) { + senderNodeIndex = queueEntry.isSenderWrappedTxGroup[senderNodeId] + } + const senderGroupSize = queueEntry.executionGroup.length + + const targetNodeIndex = queueEntry.ourTXGroupIndex // we are the receiver + const targetGroupSize = queueEntry.transactionGroup.length + const targetStartIndex = 0 // start of tx group + const targetEndIndex = queueEntry.transactionGroup.length // end of tx group + + // check if it is a FACT sender + const isValidFactSender = verifyCorrespondingSender( + targetNodeIndex, + senderNodeIndex, + queueEntry.correspondingGlobalOffset, + targetGroupSize, + senderGroupSize, + targetStartIndex, + targetEndIndex, + queueEntry.transactionGroup.length + ) + + // it is not a FACT corresponding node + if (isValidFactSender === false) { + /* prettier-ignore */ if(logFlags.error) this.mainLogger.error(`factValidateCorrespondingTellFinalDataSender: logId: ${queueEntry.logID} sender is not a valid sender isValidSender: ${isValidFactSender}`); + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellFinalDataSender: sender is not a valid sender or a neighbour node' + ) + return false + } + return true + }, + async shareCompleteDataToNeighbours(this: TransactionQueueContext, queueEntry: QueueEntry): Promise { + if (configContext.stateManager.shareCompleteData === false) { + return + } + if (queueEntry.hasAll === false || queueEntry.sharedCompleteData) { + return + } + if (queueEntry.isInExecutionHome === false) { + return + } + const dataToShare: WrappedResponses = {} + const stateList: Shardus.WrappedResponse[] = [] + for (const accountId in queueEntry.collectedData) { + const data = queueEntry.collectedData[accountId] + const riCacheResult = await this.app.getCachedRIAccountData([accountId]) + if (riCacheResult != null && riCacheResult.length > 0) { + nestedCountersInstance.countEvent('shareCompleteDataToNeighbours', 'riCacheResult, skipping') + continue + } else { + dataToShare[accountId] = data + stateList.push(data) + } + } + const payload = { txid: queueEntry.acceptedTx.txId, stateList } + const neighboursNodes = utils.selectNeighbors(queueEntry.executionGroup, queueEntry.ourExGroupIndex, 2) + if (stateList.length > 0) { + this.broadcastState(neighboursNodes, payload, 'shareCompleteDataToNeighbours') + + queueEntry.sharedCompleteData = true + nestedCountersInstance.countEvent( + `queueEntryAddData`, + `sharedCompleteData stateList: ${stateList.length} neighbours: ${neighboursNodes.length}` + ) + if (logFlags.debug || this.stateManager.consensusLog) { + this.mainLogger.debug( + `shareCompleteDataToNeighbours: shared complete data for txId ${ + queueEntry.logID + } at timestamp: ${shardusGetTime()} nodeId: ${Self.id} to neighbours: ${Utils.safeStringify( + neighboursNodes.map((node) => node.id) + )}` + ) + } + } + }, + + async gossipCompleteData(queueEntry: QueueEntry): Promise { + if (queueEntry.hasAll === false || queueEntry.gossipedCompleteData) { + return + } + if (configContext.stateManager.gossipCompleteData === false) { + return + } + const dataToGossip: WrappedResponses = {} + const stateList: Shardus.WrappedResponse[] = [] + for (const accountId in queueEntry.collectedData) { + const data = queueEntry.collectedData[accountId] + const riCacheResult = await this.app.getCachedRIAccountData([accountId]) + if (riCacheResult != null && riCacheResult.length > 0) { + nestedCountersInstance.countEvent('gossipCompleteData', 'riCacheResult, skipping') + continue + } else { + dataToGossip[accountId] = data + stateList.push(data) + } + } + const payload = { txid: queueEntry.acceptedTx.txId, stateList } + if (stateList.length > 0) { + Comms.sendGossip( + 'broadcast_state_complete_data', // deprecated + payload, + '', + Self.id, + queueEntry.executionGroup, + true, + 6, + queueEntry.acceptedTx.txId + ) + queueEntry.gossipedCompleteData = true + nestedCountersInstance.countEvent('gossipCompleteData', `stateList: ${stateList.length}`) + if (logFlags.debug || this.stateManager.consensusLog) { + this.mainLogger.debug( + `gossipQueueEntryData: gossiped data for txId ${queueEntry.logID} at timestamp: ${shardusGetTime()} nodeId: ${ + Self.id + }` + ) + } + } + }, + async broadcastState( + nodes: Shardus.Node[], + message: { stateList: Shardus.WrappedResponse[]; txid: string }, + context: string + ): Promise { + // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.broadcastStateBinary) { + // convert legacy message to binary supported type + const request = message as BroadcastStateReq + if (logFlags.seqdiagram) { + for (const node of nodes) { + if (context == 'tellCorrespondingNodes') { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_state_nodes'}`) + } else { + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_state_neighbour'}`) + } + } + } + Comms.tellBinary( + nodes, + InternalRouteEnum.binary_broadcast_state, + request, + serializeBroadcastStateReq, + { + verification_data: verificationDataCombiner( + message.txid, + message.stateList.length.toString(), + request.stateList[0].accountId + ), + } + ) + // return + // } + // this.p2p.tell(nodes, 'broadcast_state', message) + }, + + // eslint-disable-next-line @typescript-eslint/explicit-function-return-type + async requestFinalData( + queueEntry: QueueEntry, + accountIds: string[], + nodesToAskKeys: string[] | null = null, + includeAppReceiptData = false + ): Promise { + profilerInstance.profileSectionStart('requestFinalData') + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} accountIds: ${utils.stringifyReduce(accountIds)}`); + const message = { txid: queueEntry.acceptedTx.txId, accountIds, includeAppReceiptData } + let success = false + let successCount = 0 + let validAppReceiptData = includeAppReceiptData === false ? true : false + + // first check if we have received final data + for (const accountId of accountIds) { + // eslint-disable-next-line security/detect-object-injection + if (queueEntry.collectedFinalData[accountId] != null) { + successCount++ + } + } + if (successCount === accountIds.length && includeAppReceiptData === false) { + nestedCountersInstance.countEvent('stateManager', 'requestFinalDataAlreadyReceived') + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} already received all data`) + // no need to request data + return + } + + try { + let nodeToAsk: Shardus.Node + if (nodesToAskKeys && nodesToAskKeys.length > 0) { + const randomIndex = Math.floor(Math.random() * nodesToAskKeys.length) + // eslint-disable-next-line security/detect-object-injection + const randomNodeToAskKey = nodesToAskKeys[randomIndex] + nodeToAsk = byPubKey.get(randomNodeToAskKey) + } else { + const randomIndex = Math.floor(Math.random() * queueEntry.executionGroup.length) + // eslint-disable-next-line security/detect-object-injection + const randomExeNode = queueEntry.executionGroup[randomIndex] + nodeToAsk = nodes.get(randomExeNode.id) + } + + if (!nodeToAsk) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('requestFinalData: could not find node from execution group') + throw new Error('requestFinalData: could not find node from execution group') + } + + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug( `requestFinalData: txid: ${queueEntry.acceptedTx.txId} accountIds: ${utils.stringifyReduce( accountIds )}, asking node: ${nodeToAsk.id} ${nodeToAsk.externalPort} at timestamp ${shardusGetTime()}` ) + + // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.requestTxAndStateBinary) { + const requestMessage = message as RequestTxAndStateReq + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(nodeToAsk.id)}: ${'request_tx_and_state'}`) + const response = await Comms.askBinary( + nodeToAsk, + InternalRouteEnum.binary_request_tx_and_state, + requestMessage, + serializeRequestTxAndStateReq, + deserializeRequestTxAndStateResp, + {} + ) + // } else response = await Comms.ask(nodeToAsk, 'request_tx_and_state', message) + + if (response && response.stateList && response.stateList.length > 0) { + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} received data for ${response.stateList.length} accounts`) + } else { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestFinalData: txid: ${queueEntry.logID} response is null`) + nestedCountersInstance.countEvent( + 'stateManager', + 'requestFinalData: failed: response or response.stateList null or statelist length 0' + ) + return + } + + for (const data of response.stateList) { + if (data == null) { + /* prettier-ignore */ + if (logFlags.error && logFlags.debug) this.mainLogger.error(`requestFinalData data == null for tx ${queueEntry.logID}`); + success = false + break + } + const indexInVote = queueEntry.signedReceipt.proposal.accountIDs.indexOf(data.accountId) + if (indexInVote === -1) continue + const afterStateIdFromVote = queueEntry.signedReceipt.proposal.afterStateHashes[indexInVote] + if (data.stateId !== afterStateIdFromVote) { + nestedCountersInstance.countEvent('stateManager', 'requestFinalDataMismatch') + continue + } + if (queueEntry.collectedFinalData[data.accountId] == null) { + // todo: check the state hashes and verify + queueEntry.collectedFinalData[data.accountId] = data + successCount++ + /* prettier-ignore */ + if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} success accountId: ${data.accountId} stateId: ${data.stateId}`); + } + } + if (includeAppReceiptData && response.appReceiptData) { + const receivedAppReceiptDataHash = this.crypto.hash(response.appReceiptData) + const receipt2 = this.stateManager.getSignedReceipt(queueEntry) + if (receipt2 != null) { + validAppReceiptData = receivedAppReceiptDataHash === receipt2.proposal.appReceiptDataHash + } + } + if (successCount === accountIds.length && validAppReceiptData === true) { + success = true + + //setting this for completeness. if our node is awaiting final data it will utilize what was looked up here + queueEntry.hasValidFinalData = true + return { wrappedResponses: queueEntry.collectedFinalData, appReceiptData: response.appReceiptData } + } else { + nestedCountersInstance.countEvent( + 'stateManager', + `requestFinalData: failed: did not get enough data: ${successCount} < ${accountIds.length}` + ) + } + } catch (e) { + nestedCountersInstance.countEvent('stateManager', 'requestFinalData: failed: Error') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestFinalData: txid: ${queueEntry.logID} error: ${e.message}`) + } finally { + if (success === false) { + nestedCountersInstance.countEvent('stateManager', 'requestFinalData: failed: success === false') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestFinalData: txid: ${queueEntry.logID} failed. successCount: ${successCount} accountIds: ${accountIds.length}`); + } + } + profilerInstance.profileSectionEnd('requestFinalData') + }, + + async requestInitialData(queueEntry: QueueEntry, accountIds: string[]): Promise { + profilerInstance.profileSectionStart('requestInitialData') + this.mainLogger.debug( + `requestInitialData: txid: ${queueEntry.logID} accountIds: ${utils.stringifyReduce(accountIds)}` + ) + const message = { txid: queueEntry.acceptedTx.txId, accountIds } + let success = false + let successCount = 0 + let retries = 0 + const maxRetry = 3 + const triedNodes = new Set() + + if (queueEntry.executionGroup == null) return + + while (retries < maxRetry) { + const executionNodeIds = queueEntry.executionGroup.map((node) => node.id) + const randomExeNodeId = utils.getRandom(executionNodeIds, 1)[0] + if (triedNodes.has(randomExeNodeId)) continue + if (randomExeNodeId === Self.id) continue + const nodeToAsk = nodes.get(randomExeNodeId) + if (!nodeToAsk) { + if (logFlags.error) this.mainLogger.error('requestInitialData: could not find node from execution group') + throw new Error('requestInitialData: could not find node from execution group') + } + triedNodes.add(randomExeNodeId) + retries++ + try { + if (logFlags.debug) + this.mainLogger.debug( + `requestInitialData: txid: ${queueEntry.acceptedTx.txId} accountIds: ${utils.stringifyReduce( + accountIds + )}, asking node: ${nodeToAsk.id} ${nodeToAsk.externalPort} at timestamp ${shardusGetTime()}` + ) + + const requestMessage = message as RequestTxAndStateReq + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(nodeToAsk.id)}: ${'request_tx_and_state'}`) + const response = await Comms.askBinary( + nodeToAsk, + InternalRouteEnum.binary_request_tx_and_state_before, + requestMessage, + serializeRequestTxAndStateReq, + deserializeRequestTxAndStateResp, + {} + ) + + if (response && response.stateList && response.stateList.length === accountIds.length) { + this.mainLogger.debug( + `requestInitialData: txid: ${queueEntry.logID} received data for ${response.stateList.length} accounts` + ) + } else { + this.mainLogger.error(`requestInitialData: txid: ${queueEntry.logID} response is null or incomplete`) + continue + } + + const results: WrappedResponses = {} + const receipt2 = this.stateManager.getSignedReceipt(queueEntry) + if (receipt2 == null) { + return + } + if (receipt2.proposal.accountIDs.length !== response.stateList.length) { + if (logFlags.error && logFlags.debug) + this.mainLogger.error(`requestInitialData data.length not matching for tx ${queueEntry.logID}`) + return + } + for (const data of response.stateList) { + if (data == null) { + /* prettier-ignore */ + if (logFlags.error && logFlags.debug) this.mainLogger.error(`requestInitialData data == null for tx ${queueEntry.logID}`); + success = false + break + } + const indexInVote = receipt2.proposal.accountIDs.indexOf(data.accountId) + if (data.stateId === receipt2.proposal.beforeStateHashes[indexInVote]) { + successCount++ + results[data.accountId] = data + /* prettier-ignore */ + if (logFlags.debug) this.mainLogger.debug(`requestInitialData: txid: ${queueEntry.logID} success accountId: ${data.accountId} stateId: ${data.stateId}`); + } + } + return results + } catch (e) { + nestedCountersInstance.countEvent('stateManager', 'requestInitialDataError') + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestInitialData: txid: ${queueEntry.logID} error: ${e.message}`) + } + } + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestInitialData: txid: ${queueEntry.logID} failed. successCount: ${successCount} accountIds: ${accountIds.length}`); + profilerInstance.profileSectionEnd('requestInitialData') + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.handlers.ts b/src/state-manager/TransactionQueue.handlers.ts new file mode 100644 index 000000000..890bdc0b9 --- /dev/null +++ b/src/state-manager/TransactionQueue.handlers.ts @@ -0,0 +1,506 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { InternalBinaryHandler } from '../types/Handler' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { profilerInstance, cUninitializedSize } from '../utils/profiler' +import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' +import { requestErrorHandler, getStreamWithTypeCheck, verificationDataSplitter } from '../types/Helpers' +import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' +import { deserializeBroadcastStateReq } from '../types/BroadcastStateReq' +import * as utils from '../utils' +import { logFlags } from '../logger' +import * as Shardus from '../shardus/shardus-types' +import { config as configContext, P2PModuleContext as P2P } from '../p2p/Context' +import * as Self from '../p2p/Self' +import { SpreadTxToGroupSyncingReq, deserializeSpreadTxToGroupSyncingReq } from '../types/SpreadTxToGroupSyncingReq' +import { verifyPayload } from '../types/ajv/Helpers' +import { AJVSchemaEnum } from '../types/enum/AJVSchemaEnum' +import { Node } from '@shardeum-foundation/lib-types/build/src/p2p/NodeListTypes' +import { RequestStateForTxRespSerialized, serializeRequestStateForTxResp } from '../types/RequestStateForTxResp' +import { deserializeRequestStateForTxReq } from '../types/RequestStateForTxReq' +import { errorToStringFull, inRangeOfCurrentTime } from '../utils' +import { QueueEntry, AcceptedTx, ArchiverReceipt, SignedReceipt } from './state-manager-types' +import { isInternalTxAllowed, networkMode } from '../p2p/Modes' +import { shardusGetTime } from '../network' +import * as Archivers from '../p2p/Archivers' +import { network as networkContext } from '../p2p/Context' +import { Utils } from '@shardeum-foundation/lib-types' + +interface TransactionQueueContext { + getQueueEntrySafe: (txId: string) => QueueEntry | null + getQueueEntryArchived: (txId: string, route: string) => QueueEntry | null + queueEntryAddData: (queueEntry: QueueEntry, data: any, signatureCheck?: boolean) => void + logger: any + mainLogger: any + stateManager: any + p2p: P2P + profiler: any + crypto: any + app: Shardus.App + archivedQueueEntriesByID: Map + _transactionQueueByID: Map + getArchiverReceiptFromQueueEntry: (queueEntry: QueueEntry) => Promise + factValidateCorrespondingTellSender: (queueEntry: QueueEntry, accountId: string, senderId: string) => boolean + validateCorrespondingTellSender: (queueEntry: QueueEntry, accountId: string, senderId: string) => boolean + handleSharedTX: (tx: Shardus.TimestampedTx, appData: unknown, sender: Shardus.Node) => QueueEntry + queueEntryGetTransactionGroup: (queueEntry: QueueEntry) => Shardus.Node[] + addOriginalTxDataToForward: (queueEntry: QueueEntry) => void + routeAndQueueAcceptedTransaction: (acceptedTx: AcceptedTx, sendGossip: boolean, sender: Shardus.Node | null, globalModification: boolean, noConsensus: boolean) => string | boolean + statemanager_fatal: (key: string, log: string) => void + config: Shardus.StrictServerConfiguration +} + +export const handlers = { + setupHandlers(this: TransactionQueueContext): void { + const broadcastStateRoute: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_broadcast_state, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + handler: (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_broadcast_state + nestedCountersInstance.countEvent('internal', route) + profilerInstance.scopedProfileSectionStart(route, false, payload.length) + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + try { + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cBroadcastStateReq) + if (!requestStream) { + return errorHandler(RequestErrorEnum.InvalidRequest) + } + // verification data checks + if (header.verification_data == null) { + return errorHandler(RequestErrorEnum.MissingVerificationData) + } + const verificationDataParts = verificationDataSplitter(header.verification_data) + if (verificationDataParts.length !== 3) { + return errorHandler(RequestErrorEnum.InvalidVerificationData) + } + const [vTxId, vStateSize, vStateAddress] = verificationDataParts + const queueEntry = this.getQueueEntrySafe(vTxId) + //It is okay to ignore this transaction if the txId is not found in the queue. + if (queueEntry == null) { + /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`${route} cant find queueEntry for: ${utils.makeShortHash(vTxId)}`) + return errorHandler(RequestErrorEnum.InvalidVerificationData, { + customCounterSuffix: 'queueEntryNotFound', + }) + } + + const req = deserializeBroadcastStateReq(requestStream) + if (req.txid !== vTxId) { + return errorHandler(RequestErrorEnum.InvalidVerificationData) + } + + if (req.stateList.length !== parseInt(vStateSize)) { + return errorHandler(RequestErrorEnum.InvalidVerificationData) + } + /* prettier-ignore */ if (logFlags.verbose && logFlags.console) console.log(`${route}: txId: ${req.txid} stateSize: ${req.stateList.length} stateAddress: ${vStateAddress}`) + + const senderNodeId = header.sender_id + let isSenderOurExeNeighbour = false + const senderIsInExecutionGroup = queueEntry.executionGroupMap.has(senderNodeId) + const neighbourNodes = utils.selectNeighbors( + queueEntry.executionGroup, + queueEntry.ourExGroupIndex, + 2 + ) as Shardus.Node[] + const neighbourNodeIds = neighbourNodes.map((node) => node.id) + isSenderOurExeNeighbour = senderIsInExecutionGroup && neighbourNodeIds.includes(senderNodeId) + + // sender verification loop + for (let i = 0; i < req.stateList.length; i++) { + // eslint-disable-next-line security/detect-object-injection + const state = req.stateList[i] + let isSenderValid = false + if (configContext.p2p.useFactCorrespondingTell) { + // check if it is a neighbour exe node sharing data + if (configContext.stateManager.shareCompleteData) { + if (isSenderOurExeNeighbour) { + nestedCountersInstance.countEvent( + 'stateManager', + 'factValidateCorrespondingTellSender: sender is an execution node and a neighbour node' + ) + isSenderValid = true + } else { + // check if it is a corresponding tell sender + isSenderValid = this.factValidateCorrespondingTellSender(queueEntry, state.accountId, senderNodeId) + } + } else { + // check if it is a corresponding tell sender + isSenderValid = this.factValidateCorrespondingTellSender(queueEntry, state.accountId, senderNodeId) + } + } else { + isSenderValid = this.validateCorrespondingTellSender(queueEntry, state.accountId, senderNodeId) + } + + if ( + this.stateManager.testFailChance( + configContext.debug.ignoreDataTellChance, + 'ignoreDataTellChance', + queueEntry.logID, + '', + logFlags.verbose + ) === true + ) { + isSenderValid = false + } + + if (isSenderValid === false) { + this.mainLogger.error(`${route} validateCorrespondingTellSender failed for ${state.accountId}`) + nestedCountersInstance.countEvent('processing', 'validateCorrespondingTellSender failed') + return errorHandler(RequestErrorEnum.InvalidSender) + } + } + // update loop + for (let i = 0; i < req.stateList.length; i++) { + // eslint-disable-next-line security/detect-object-injection + const state = req.stateList[i] + + if (configContext.stateManager.collectedDataFix && configContext.stateManager.rejectSharedDataIfCovered) { + const consensusNodes = this.stateManager.transactionQueue.getConsenusGroupForAccount(state.accountId) + const coveredByUs = consensusNodes.map((node) => node.id).includes(Self.id) + if (coveredByUs) { + nestedCountersInstance.countEvent('processing', 'broadcast_state_coveredByUs') + /* prettier-ignore */ if (logFlags.verbose) console.log(`broadcast_state: coveredByUs: ${state.accountId} no need to accept this data`) + continue + } else { + this.queueEntryAddData(queueEntry, state) + } + } else { + this.queueEntryAddData(queueEntry, state) + } + if (queueEntry.state === 'syncing') { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastData', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${state.accountId}`) + } + } + } catch (e) { + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) + } finally { + profilerInstance.scopedProfileSectionEnd(route, payload.length) + } + }, + } + + this.p2p.registerInternalBinary(broadcastStateRoute.name, broadcastStateRoute.handler) + + const spreadTxToGroupSyncingBinaryHandler: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_spread_tx_to_group_syncing, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + handler: async (payload, respond, header, sign) => { + const route = InternalRouteEnum.binary_spread_tx_to_group_syncing + nestedCountersInstance.countEvent('internal', route) + this.profiler.scopedProfileSectionStart(route, false, payload.length) + const errorHandler = ( + errorType: RequestErrorEnum, + opts?: { customErrorLog?: string; customCounterSuffix?: string } + ): void => requestErrorHandler(route, errorType, header, opts) + + try { + const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cSpreadTxToGroupSyncingReq) + if (!requestStream) { + return errorHandler(RequestErrorEnum.InvalidRequest) + } + + const req: SpreadTxToGroupSyncingReq = deserializeSpreadTxToGroupSyncingReq(requestStream) + + const ajvErrors = verifyPayload(AJVSchemaEnum.SpreadTxToGroupSyncingReq, req) + if (ajvErrors && ajvErrors.length > 0) { + this.mainLogger.error(`${route}: request validation errors: ${ajvErrors}`) + return errorHandler(RequestErrorEnum.InvalidPayload) + } + + const node = this.p2p.state.getNode(header.sender_id) + this.handleSharedTX(req.data, req.appData, node) + } catch (e) { + nestedCountersInstance.countEvent('internal', `${route}-exception`) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) + } finally { + this.profiler.scopedProfileSectionEnd(route) + } + }, + } + + this.p2p.registerInternalBinary( + spreadTxToGroupSyncingBinaryHandler.name, + spreadTxToGroupSyncingBinaryHandler.handler + ) + + this.p2p.registerGossipHandler( + 'spread_tx_to_group', + async ( + payload: { data: Shardus.TimestampedTx; appData: unknown }, + sender: Node, + tracker: string, + msgSize: number + ) => { + profilerInstance.scopedProfileSectionStart('spread_tx_to_group', false, msgSize) + let respondSize = cUninitializedSize + try { + // Place tx in queue (if younger than m) + // gossip 'spread_tx_to_group' to transaction group + + //handleSharedTX will also validate fields. payload is an AcceptedTX so must pass in the .data as the rawTX + const queueEntry = this.handleSharedTX(payload.data, payload.appData, sender) + if (queueEntry == null) { + return + } + + // get transaction group + const transactionGroup = this.queueEntryGetTransactionGroup(queueEntry) + if (queueEntry.ourNodeInTransactionGroup === false) { + return + } + if (transactionGroup.length > 1) { + this.stateManager.debugNodeGroup( + queueEntry.acceptedTx.txId, + queueEntry.acceptedTx.timestamp, + `spread_tx_to_group transactionGroup:`, + transactionGroup + ) + respondSize = await this.p2p.sendGossipIn( + 'spread_tx_to_group', + payload, + tracker, + sender, + transactionGroup, + false, + -1, + queueEntry.acceptedTx.txId + ) + /* prettier-ignore */ if (logFlags.verbose) console.log( 'queueEntry.isInExecutionHome', queueEntry.acceptedTx.txId, queueEntry.isInExecutionHome ) + // If our node is in the execution group, forward this raw tx to the subscribed archivers + if (queueEntry.isInExecutionHome === true) { + this.addOriginalTxDataToForward(queueEntry) + } + } + } finally { + profilerInstance.scopedProfileSectionEnd('spread_tx_to_group', respondSize) + } + } + ) + + const requestStateForTxRoute: P2PTypes.P2PTypes.Route> = { + name: InternalRouteEnum.binary_request_state_for_tx, + handler: (payload, respond) => { + const route = InternalRouteEnum.binary_request_state_for_tx + profilerInstance.scopedProfileSectionStart(route) + nestedCountersInstance.countEvent('internal', route) + + const response: RequestStateForTxRespSerialized = { + stateList: [], + beforeHashes: {}, + note: '', + success: false, + } + try { + const responseStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestStateForTxReq) + if (!responseStream) { + this.mainLogger.error(`${route}: Invalid request`) + respond(response, serializeRequestStateForTxResp) + return + } + const req = deserializeRequestStateForTxReq(responseStream) + if (req.txid == null) { + throw new Error('Txid is null') + } + let queueEntry = this.getQueueEntrySafe(req.txid) + if (queueEntry == null) { + queueEntry = this.getQueueEntryArchived(req.txid, InternalRouteEnum.binary_request_state_for_tx) + } + + if (queueEntry == null) { + response.note = `failed to find queue entry: ${utils.stringifyReduce(req.txid)} ${req.timestamp} dbg:${ + this.stateManager.debugTXHistory[utils.stringifyReduce(req.txid)] + }` + respond(response, serializeRequestStateForTxResp) + // if a node cant get data it will have to get repaired by the patcher since we can only keep stuff en the archive queue for so long + // due to memory concerns + return + } + + for (const key of req.keys) { + // eslint-disable-next-line security/detect-object-injection + const data = queueEntry.originalData[key] // collectedData + if (data) { + response.stateList.push(data) + } + } + response.success = true + respond(response, serializeRequestStateForTxResp) + } catch (e) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${InternalRouteEnum.binary_request_state_for_tx}: Exception executing request: ${errorToStringFull(e)}`) + nestedCountersInstance.countEvent('internal', `${route}-exception`) + respond(response, serializeRequestStateForTxResp) + } finally { + profilerInstance.scopedProfileSectionEnd(InternalRouteEnum.binary_request_state_for_tx) + } + }, + } + + this.p2p.registerInternalBinary(requestStateForTxRoute.name, requestStateForTxRoute.handler) + + networkContext.registerExternalPost('get-tx-receipt', async (req, res) => { + let result: { success: boolean; receipt?: ArchiverReceipt | SignedReceipt; reason?: string } + try { + let error = utils.validateTypes(req.body, { + txId: 's', + timestamp: 'n', + full_receipt: 'b', + sign: 'o', + }) + if (error) { + res.json((result = { success: false, reason: error })) + return + } + error = utils.validateTypes(req.body.sign, { + owner: 's', + sig: 's', + }) + if (error) { + res.json((result = { success: false, reason: error })) + return + } + + const { txId, timestamp, full_receipt, sign } = req.body + const isReqFromArchiver = Archivers.archivers.has(sign.owner) + if (!isReqFromArchiver) { + result = { success: false, reason: 'Request not from Archiver.' } + } else { + const isValidSignature = this.crypto.verify(req.body, sign.owner) + if (isValidSignature) { + let queueEntry: QueueEntry + if ( + this.archivedQueueEntriesByID.has(txId) && + this.archivedQueueEntriesByID.get(txId)?.acceptedTx?.timestamp === timestamp + ) { + if (logFlags.verbose) console.log('get-tx-receipt: ', txId, timestamp, 'archived') + queueEntry = this.archivedQueueEntriesByID.get(txId) + } else if ( + this._transactionQueueByID.has(txId) && + this._transactionQueueByID.get(txId)?.state === 'commiting' && + this._transactionQueueByID.get(txId)?.acceptedTx?.timestamp === timestamp + ) { + if (logFlags.verbose) console.log('get-tx-receipt: ', txId, timestamp, 'commiting') + queueEntry = this._transactionQueueByID.get(txId) + } + if (!queueEntry) { + res.status(400).json({ success: false, reason: 'Receipt Not Found.' }) + return + } + if (full_receipt) { + const fullReceipt: ArchiverReceipt = await this.getArchiverReceiptFromQueueEntry(queueEntry) + if (fullReceipt === null) { + res.status(400).json({ success: false, reason: 'Receipt Not Found.' }) + return + } + result = Utils.safeJsonParse(Utils.safeStringify({ success: true, receipt: fullReceipt })) + } else { + result = { success: true, receipt: this.stateManager.getSignedReceipt(queueEntry) } + } + } else { + result = { success: false, reason: 'Invalid Signature.' } + } + } + res.json(result) + } catch (e) { + console.log('Error caught in /get-tx-receipt: ', e) + res.json((result = { success: false, reason: e })) + } + }) + }, + handleSharedTX(this: TransactionQueueContext, tx: Shardus.TimestampedTx, appData: unknown, sender: Shardus.Node): QueueEntry { + profilerInstance.profileSectionStart('handleSharedTX') + const internalTx = this.app.isInternalTx(tx) + if ((internalTx && !isInternalTxAllowed()) || (!internalTx && networkMode !== 'processing')) { + profilerInstance.profileSectionEnd('handleSharedTX') + // Block invalid txs in case a node maliciously relays them to other nodes + return null + } + if (!internalTx && !this.config.p2p.allowEndUserTxnInjections) { + profilerInstance.profileSectionEnd('handleSharedTX') + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tx_non_internal_tx_paused', '', 'execution paused for non-internal tx') + return null + } + // Perform fast validation of the transaction fields + profilerInstance.scopedProfileSectionStart('handleSharedTX_validateTX') + const validateResult = this.app.validate(tx, appData) + profilerInstance.scopedProfileSectionEnd('handleSharedTX_validateTX') + if (validateResult.success === false) { + this.statemanager_fatal( + `spread_tx_to_group_validateTX`, + `spread_tx_to_group validateTxnFields failed: ${utils.stringifyReduce(validateResult)}` + ) + profilerInstance.profileSectionEnd('handleSharedTX') + return null + } + + // Ask App to crack open tx and return timestamp, id (hash), and keys + const { timestamp, id, keys, shardusMemoryPatterns } = this.app.crack(tx, appData) + + // Check if we already have this tx in our queue + let queueEntry = this.getQueueEntrySafe(id) // , payload.timestamp) + if (queueEntry) { + profilerInstance.profileSectionEnd('handleSharedTX') + return null + } + + // Need to review these timeouts before main net. what bad things can happen by setting a timestamp too far in the future or past. + // only a subset of transactions can have timestamp set by the sender while others use independent consensus (askTxnTimestampFromNode) + // but that is up to the dapp + const mostOfQueueSitTimeMs = this.stateManager.queueSitTime * 0.9 + const txExpireTimeMs = this.config.transactionExpireTime * 1000 + const age = shardusGetTime() - timestamp + if (inRangeOfCurrentTime(timestamp, mostOfQueueSitTimeMs, txExpireTimeMs) === false) { + /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal( `spread_tx_to_group_OldTx_or_tooFuture`, 'spread_tx_to_group cannot accept tx with age: ' + age ) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_spread_tx_to_groupToOldOrTooFuture', '', 'spread_tx_to_group working on tx with age: ' + age) + profilerInstance.profileSectionEnd('handleSharedTX') + return null + } + + // Pack into AcceptedTx for routeAndQueueAcceptedTransaction + const acceptedTx: AcceptedTx = { + timestamp, + txId: id, + keys, + data: tx, + appData, + shardusMemoryPatterns, + } + + const noConsensus = false // this can only be true for a set command which will never come from an endpoint + const added = this.routeAndQueueAcceptedTransaction( + acceptedTx, + /*sendGossip*/ false, + sender, + /*globalModification*/ false, + noConsensus + ) + if (added === 'lost') { + profilerInstance.profileSectionEnd('handleSharedTX') + return null // we are faking that the message got lost so bail here + } + if (added === 'out of range') { + profilerInstance.profileSectionEnd('handleSharedTX') + return null + } + if (added === 'notReady') { + profilerInstance.profileSectionEnd('handleSharedTX') + return null + } + queueEntry = this.getQueueEntrySafe(id) //, payload.timestamp) // now that we added it to the queue, it should be possible to get the queueEntry now + + if (queueEntry == null) { + // do not gossip this, we are not involved + // downgrading, this does not seem to be fatal, but may need further logs/testing + //this.statemanager_fatal(`spread_tx_to_group_noQE`, `spread_tx_to_group failed: cant find queueEntry for: ${utils.makeShortHash(payload.id)}`) + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('spread_tx_to_group_noQE', '', `spread_tx_to_group failed: cant find queueEntry for: ${utils.makeShortHash(id)}`) + profilerInstance.profileSectionEnd('handleSharedTX') + return null + } + + profilerInstance.profileSectionEnd('handleSharedTX') + return queueEntry + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.nonce.ts b/src/state-manager/TransactionQueue.nonce.ts new file mode 100644 index 000000000..ef585d572 --- /dev/null +++ b/src/state-manager/TransactionQueue.nonce.ts @@ -0,0 +1,148 @@ +import { NonceQueueItem } from './state-manager-types' +import { logFlags } from '../logger' +import * as utils from '../utils' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { shardusGetTime, getNetworkTimeOffset } from '../network' +import * as NodeList from '../p2p/NodeList' +import * as Self from '../p2p/Self' +import * as Shardus from '../shardus/shardus-types' + +interface TransactionQueueContext { + nonceQueue: Map + mainLogger: any + seqLogger: any + app: Shardus.App + _timestampAndQueueTransaction: (tx: any, appData: any, txId: string) => Promise + stateManager: any +} + +export const nonceMethods = { + isTxInPendingNonceQueue(this: TransactionQueueContext, accountId: string, txId: string): boolean { + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`isTxInPendingNonceQueue ${accountId} ${txId}`, this.nonceQueue) + const queue = this.nonceQueue.get(accountId) + if (queue == null) { + return false + } + for (const item of queue) { + if (item.txId === txId) { + return true + } + } + return false + }, + + getPendingCountInNonceQueue(this: TransactionQueueContext): { totalQueued: number; totalAccounts: number; avgQueueLength: number } { + let totalQueued = 0 + let totalAccounts = 0 + for (const queue of this.nonceQueue.values()) { + totalQueued += queue.length + totalAccounts++ + } + const avgQueueLength = totalQueued / totalAccounts + return { totalQueued, totalAccounts, avgQueueLength } + }, + + addTransactionToNonceQueue(this: TransactionQueueContext, nonceQueueEntry: NonceQueueItem): { + success: boolean + reason?: string + alreadyAdded?: boolean + } { + try { + let queue = this.nonceQueue.get(nonceQueueEntry.accountId) + if (queue == null || (Array.isArray(queue) && queue.length === 0)) { + queue = [nonceQueueEntry] + this.nonceQueue.set(nonceQueueEntry.accountId, queue) + if (logFlags.debug) + this.mainLogger.debug( + `adding new nonce tx: ${nonceQueueEntry.txId} ${nonceQueueEntry.accountId} with nonce ${nonceQueueEntry.nonce}` + ) + } else if (queue && queue.length > 0) { + const index = utils.binarySearch(queue, nonceQueueEntry, (a, b) => Number(a.nonce) - Number(b.nonce)) + + if (index >= 0) { + // there is existing item with the same nonce. replace it with the new one + queue[index] = nonceQueueEntry + this.nonceQueue.set(nonceQueueEntry.accountId, queue) + nestedCountersInstance.countEvent('processing', 'replaceExistingNonceTx') + if (logFlags.debug) + this.mainLogger.debug( + `replace existing nonce tx ${nonceQueueEntry.accountId} with nonce ${nonceQueueEntry.nonce}, txId: ${nonceQueueEntry.txId}` + ) + return { success: true, reason: 'Replace existing pending nonce tx', alreadyAdded: true } + } + // add new item to the queue + utils.insertSorted(queue, nonceQueueEntry, (a, b) => Number(a.nonce) - Number(b.nonce)) + this.nonceQueue.set(nonceQueueEntry.accountId, queue) + } + /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${nonceQueueEntry.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: pause_nonceQ`) + nestedCountersInstance.countEvent('processing', 'addTransactionToNonceQueue') + if (logFlags.debug) + this.mainLogger.debug( + `Added tx to nonce queue for ${nonceQueueEntry.accountId} with nonce ${nonceQueueEntry.nonce} nonceQueue: ${queue.length}` + ) + return { success: true, reason: `Nonce queue size for account: ${queue.length}`, alreadyAdded: false } + } catch (e) { + nestedCountersInstance.countEvent('processing', 'addTransactionToNonceQueueError') + this.mainLogger.error( + `Error adding tx to nonce queue: ${e.message}, tx: ${utils.stringifyReduce(nonceQueueEntry)}` + ) + return { success: false, reason: e.message, alreadyAdded: false } + } + }, + async processNonceQueue(this: TransactionQueueContext, accounts: Shardus.WrappedData[]): Promise { + for (const account of accounts) { + const queue = this.nonceQueue.get(account.accountId) + if (queue == null) { + continue + } + for (const item of queue) { + const accountNonce = await this.app.getAccountNonce(account.accountId, account) + if (item.nonce === accountNonce) { + nestedCountersInstance.countEvent('processing', 'processNonceQueue foundMatchingNonce') + if (logFlags.debug) + this.mainLogger.debug( + `Found matching nonce in queue or ${account.accountId} with nonce ${item.nonce}`, + item + ) + item.appData.requestNewTimestamp = true + + // start of timestamp logging + if (logFlags.important_as_error) { + const txTimestamp = this.app.getTimestampFromTransaction(item.tx, item.appData) + const nowNodeTimestamp = shardusGetTime() + const delta = nowNodeTimestamp - txTimestamp + const ntpOffset = getNetworkTimeOffset() + /* prettier-ignore */ console.log(`TxnTS: pre _timestampAndQueueTransaction txTimestamp=${txTimestamp}, nowNodeTimestamp=${nowNodeTimestamp}, delta=${delta}, ntpOffset=${ntpOffset}, txID=${item.txId}`) + } + // end of timestamp logging. + + await this.stateManager.shardus._timestampAndQueueTransaction( + item.tx, + item.appData, + item.global, + item.noConsensus, + 'nonceQueue' + ) + + // start of timestamp logging + if (logFlags.important_as_error) { + const txTimestamp = this.app.getTimestampFromTransaction(item.tx, item.appData) + const nowNodeTimestamp = shardusGetTime() + const delta = nowNodeTimestamp - txTimestamp + const ntpOffset = getNetworkTimeOffset() + /* prettier-ignore */ console.log(`TxnTS: post _timestampAndQueueTransaction txTimestamp=${txTimestamp}, nowNodeTimestamp=${nowNodeTimestamp}, delta=${delta}, ntpOffset=${ntpOffset}, txID=${item.txId}`) + } + // end of timestamp logging. + + // remove the item from the queue + const index = queue.indexOf(item) + queue.splice(index, 1) + + //we should break here. we keep looking up account values after we go to the step needed. + //this assumes we will not put two TXs with the same nonce value in the queue. + break + } + } + } + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.seen.ts b/src/state-manager/TransactionQueue.seen.ts new file mode 100644 index 000000000..390bdfab2 --- /dev/null +++ b/src/state-manager/TransactionQueue.seen.ts @@ -0,0 +1,226 @@ +import { SeenAccounts, QueueEntry } from './state-manager-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import { logFlags } from '../logger' + +interface TransactionQueueContext { + config: any + queueReads: Set + queueWrites: Set + queueReadWritesOld: Set + mainLogger: any + processQueue_accountSeen2: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => boolean + processQueue_markAccountsSeen2: (seenAccounts: SeenAccounts, queueEntry: QueueEntry) => void +} + +export const seenMethods = { + /** + * processQueue_accountSeen + * Helper for processQueue to detect if this queueEntry has any accounts that are already blocked because they were seen upstream + * a seen account is a an account that is involved in a TX that is upstream(older) in the queue + * @param seenAccounts + * @param queueEntry + */ + processQueue_accountSeen(this: TransactionQueueContext, seenAccounts: SeenAccounts, queueEntry: QueueEntry): boolean { + if (this.config.debug.useShardusMemoryPatterns && queueEntry.shardusMemoryPatternSets != null) { + return this.processQueue_accountSeen2(seenAccounts, queueEntry) + } + + if (queueEntry.uniqueKeys == null) { + //TSConversion double check if this needs extra logging + return false + } + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (seenAccounts[key] != null) { + return true + } + } + return false + }, + + processQueue_getUpstreamTx(this: TransactionQueueContext, seenAccounts: SeenAccounts, queueEntry: QueueEntry): QueueEntry | null { + if (this.config.debug.useShardusMemoryPatterns && queueEntry.shardusMemoryPatternSets != null) { + return null + } + if (queueEntry.uniqueKeys == null) { + //TSConversion double check if this needs extra logging + return null + } + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (seenAccounts[key] != null) { + return seenAccounts[key] + } + } + return null + }, + + /** + * processQueue_markAccountsSeen + * Helper for processQueue to mark accounts as seen. + * note only operates on writeable accounts. a read only account should not block downstream operations + * a seen account is a an account that is involved in a TX that is upstream(older) in the queue + * @param seenAccounts + * @param queueEntry + */ + processQueue_markAccountsSeen(this: TransactionQueueContext, seenAccounts: SeenAccounts, queueEntry: QueueEntry): void { + if (this.config.debug.useShardusMemoryPatterns && queueEntry.shardusMemoryPatternSets != null) { + this.processQueue_markAccountsSeen2(seenAccounts, queueEntry) + return + } + + if (queueEntry.uniqueWritableKeys == null) { + //TSConversion double check if this needs extra logging + return + } + // only mark writeable keys as seen but we will check/clear against all keys + /* eslint-disable security/detect-object-injection */ + for (const key of queueEntry.uniqueWritableKeys) { + if (seenAccounts[key] == null) { + seenAccounts[key] = queueEntry + } + } + /* eslint-enable security/detect-object-injection */ + }, + + // this.queueReads = new Set() + // this.queueWrites = new Set() + processQueue_accountSeen2(this: TransactionQueueContext, seenAccounts: SeenAccounts, queueEntry: QueueEntry): boolean { + if (queueEntry.uniqueKeys == null) { + //TSConversion double check if this needs extra logging + return false + } + + if (queueEntry.shardusMemoryPatternSets != null) { + //normal blocking for read write + for (const id of queueEntry.shardusMemoryPatternSets.rw) { + if (this.queueWrites.has(id)) { + // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen rw queue_write') + // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen rw queue_write ${id}`) + return true + } + if (this.queueReadWritesOld.has(id)) { + // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen rw old queue_write') + // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen rw old queue_write ${id}`) + return true + } + //also blocked by upstream reads + if (this.queueReads.has(id)) { + // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen rw queue_read') + // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen rw queue_read ${id}`) + return true + } + } + // in theory write only is not blocked by upstream writes + // but has to wait its turn if there is an uptream read + for (const id of queueEntry.shardusMemoryPatternSets.wo) { + //also blocked by upstream reads + if (this.queueReads.has(id)) { + // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen wo queue_read') + // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen wo queue_read ${id}`) + return true + } + if (this.queueReadWritesOld.has(id)) { + // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen wo queue_read_write_old') + // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen wo queue_read_write_old ${id}`) + return true + } + } + + // write once... also not blocked in theory, because the first op is a write + // this is a special case for something like code bytes that are written once + // and then immutable + // for (const id of queueEntry.shardusMemoryPatternSets.on) { + // if(this.queueWrites.has(id)){ + // return true + // } + // if(this.queueWritesOld.has(id)){ + // return true + // } + // } + + //read only blocks for upstream writes + for (const id of queueEntry.shardusMemoryPatternSets.ro) { + if (this.queueWrites.has(id)) { + nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen ro queue_write') + return true + } + if (this.queueReadWritesOld.has(id)) { + nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen ro queue_read_write_old') + return true + } + //note blocked by upstream reads, because this read only operation + //will not impact the upstream read + } + + //we made it, not blocked + return false + } + + for (const key of queueEntry.uniqueKeys) { + // eslint-disable-next-line security/detect-object-injection + if (seenAccounts[key] != null) { + return true + } + } + + return false + }, + + processQueue_markAccountsSeen2(this: TransactionQueueContext, seenAccounts: SeenAccounts, queueEntry: QueueEntry): void { + if (queueEntry.uniqueWritableKeys == null) { + //TSConversion double check if this needs extra logging + return + } + + if (queueEntry.shardusMemoryPatternSets != null) { + for (const id of queueEntry.shardusMemoryPatternSets.rw) { + this.queueWrites.add(id) + this.queueReads.add(id) + } + for (const id of queueEntry.shardusMemoryPatternSets.wo) { + this.queueWrites.add(id) + } + for (const id of queueEntry.shardusMemoryPatternSets.on) { + this.queueWrites.add(id) + } + for (const id of queueEntry.shardusMemoryPatternSets.ro) { + this.queueReads.add(id) + } + return + } + + // only mark writeable keys as seen but we will check/clear against all keys + /* eslint-disable security/detect-object-injection */ + for (const key of queueEntry.uniqueWritableKeys) { + if (seenAccounts[key] == null) { + seenAccounts[key] = queueEntry + } + //old style memory access is treated as RW: + this.queueReadWritesOld.add(key) + } + /* eslint-enable security/detect-object-injection */ + }, + + /** + * processQueue_clearAccountsSeen + * Helper for processQueue to clear accounts that were marked as seen. + * a seen account is a an account that is involved in a TX that is upstream(older) in the queue + * @param seenAccounts + * @param queueEntry + */ + processQueue_clearAccountsSeen(this: TransactionQueueContext, seenAccounts: SeenAccounts, queueEntry: QueueEntry): void { + if (queueEntry.uniqueKeys == null) { + //TSConversion double check if this needs extra logging + return + } + /* eslint-disable security/detect-object-injection */ + for (const key of queueEntry.uniqueKeys) { + if (seenAccounts[key] != null && seenAccounts[key].logID === queueEntry.logID) { + if (logFlags.verbose) this.mainLogger.debug(`${new Date()}}clearing key ${key} for tx ${queueEntry.logID}`) + seenAccounts[key] = null + } + } + /* eslint-enable security/detect-object-injection */ + } +} \ No newline at end of file diff --git a/src/state-manager/TransactionQueue.ts b/src/state-manager/TransactionQueue.ts index f2738ccd2..634be633d 100644 --- a/src/state-manager/TransactionQueue.ts +++ b/src/state-manager/TransactionQueue.ts @@ -1,91 +1,61 @@ -import * as Context from '../p2p/Context' -import { P2P as P2PTypes, StateManager as StateManagerTypes } from '@shardeum-foundation/lib-types' +import { P2P as P2PTypes, StateManager as StateManagerTypes, Utils } from '@shardeum-foundation/lib-types' +import { Logger as L4jsLogger } from 'log4js' import StateManager from '.' import Crypto from '../crypto' import Logger, { logFlags } from '../logger' +import { shardusGetTime } from '../network' import * as Apoptosis from '../p2p/Apoptosis' import * as Archivers from '../p2p/Archivers' -import { P2PModuleContext as P2P, network as networkContext, config as configContext } from '../p2p/Context' +import * as Comms from '../p2p/Comms' +import * as Context from '../p2p/Context' +import { P2PModuleContext as P2P, config as configContext } from '../p2p/Context' import * as CycleChain from '../p2p/CycleChain' -import { nodes, byPubKey, potentiallyRemoved, activeByIdOrder } from '../p2p/NodeList' +import { getGlobalTxReceipt } from '../p2p/GlobalAccounts' +import * as NodeList from '../p2p/NodeList' +import { byPubKey, nodes } from '../p2p/NodeList' +import * as Self from '../p2p/Self' +import { isNodeInRotationBounds } from '../p2p/Utils' import * as Shardus from '../shardus/shardus-types' import Storage from '../storage' +import { BroadcastStateReq, serializeBroadcastStateReq } from '../types/BroadcastStateReq' +import { + verificationDataCombiner +} from '../types/Helpers' +import { RequestTxAndStateReq, serializeRequestTxAndStateReq } from '../types/RequestTxAndStateReq' +import { RequestTxAndStateResp, deserializeRequestTxAndStateResp } from '../types/RequestTxAndStateResp' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' import * as utils from '../utils' -import { getCorrespondingNodes, verifyCorrespondingSender } from '../utils/fastAggregatedCorrespondingTell' -import { Signature, SignedObject } from '@shardeum-foundation/lib-crypto-utils' -import { errorToStringFull, inRangeOfCurrentTime, withTimeout, XOR } from '../utils' -import { Utils } from '@shardeum-foundation/lib-types' -import * as Self from '../p2p/Self' -import * as Comms from '../p2p/Comms' import { nestedCountersInstance } from '../utils/nestedCounters' -import Profiler, { cUninitializedSize, profilerInstance } from '../utils/profiler' +import Profiler, { profilerInstance } from '../utils/profiler' +import { XOR } from '../utils/functions/general' import ShardFunctions from './shardFunctions' -import * as NodeList from '../p2p/NodeList' import { AcceptedTx, AccountFilter, + ArchiverReceipt, CommitConsensedTransactionResult, + NonceQueueItem, PreApplyAcceptedTransactionResult, ProcessQueueStats, QueueCountsResult, QueueEntry, - RequestReceiptForTxResp_old, - RequestStateForTxReq, - RequestStateForTxResp, - SeenAccounts, + RequestFinalDataResp, + SignedReceipt, SimpleNumberStats, - StringBoolObjectMap, - StringNodeObjectMap, TxDebug, WrappedResponses, - ArchiverReceipt, - NonceQueueItem, - SignedReceipt, - Proposal, - RequestFinalDataResp, + SeenAccounts, } from './state-manager-types' -import { isInternalTxAllowed, networkMode } from '../p2p/Modes' -import { Node } from '@shardeum-foundation/lib-types/build/src/p2p/NodeListTypes' -import { Logger as L4jsLogger } from 'log4js' -import { getNetworkTimeOffset, ipInfo, shardusGetTime } from '../network' -import { InternalBinaryHandler } from '../types/Handler' -import { BroadcastStateReq, deserializeBroadcastStateReq, serializeBroadcastStateReq } from '../types/BroadcastStateReq' -import { - getStreamWithTypeCheck, - requestErrorHandler, - verificationDataCombiner, - verificationDataSplitter, -} from '../types/Helpers' -import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' -import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' -import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' -import { - BroadcastFinalStateReq, - deserializeBroadcastFinalStateReq, - serializeBroadcastFinalStateReq, -} from '../types/BroadcastFinalStateReq' -import { verifyPayload } from '../types/ajv/Helpers' -import { - SpreadTxToGroupSyncingReq, - deserializeSpreadTxToGroupSyncingReq, - serializeSpreadTxToGroupSyncingReq, -} from '../types/SpreadTxToGroupSyncingReq' -import { RequestTxAndStateReq, serializeRequestTxAndStateReq } from '../types/RequestTxAndStateReq' -import { RequestTxAndStateResp, deserializeRequestTxAndStateResp } from '../types/RequestTxAndStateResp' -import { deserializeRequestStateForTxReq, serializeRequestStateForTxReq } from '../types/RequestStateForTxReq' -import { - deserializeRequestStateForTxResp, - RequestStateForTxRespSerialized, - serializeRequestStateForTxResp, -} from '../types/RequestStateForTxResp' -import { deserializeRequestReceiptForTxResp, RequestReceiptForTxRespSerialized } from '../types/RequestReceiptForTxResp' -import { RequestReceiptForTxReqSerialized, serializeRequestReceiptForTxReq } from '../types/RequestReceiptForTxReq' -import { isNodeInRotationBounds } from '../p2p/Utils' -import { BadRequest, ResponseError, serializeResponseError } from '../types/ResponseError' -import { error } from 'console' -import { PoqoDataAndReceiptReq, serializePoqoDataAndReceiptReq } from '../types/PoqoDataAndReceiptReq' -import { AJVSchemaEnum } from '../types/enum/AJVSchemaEnum' -import { getGlobalTxReceipt } from '../p2p/GlobalAccounts' + +import { archiverMethods } from './TransactionQueue.archiver' +import { coreMethods } from './TransactionQueue.core' +import { debugMethods } from './TransactionQueue.debug' +import { entryMethods } from './TransactionQueue.entry' +import { expiredMethods } from './TransactionQueue.expired' +import { factMethods } from './TransactionQueue.fact' +import { handlers } from './TransactionQueue.handlers' +import { nonceMethods } from './TransactionQueue.nonce' +import { seenMethods } from './TransactionQueue.seen' interface Receipt { tx: AcceptedTx @@ -99,7 +69,6 @@ export enum DebugComplete { Incomplete = 0, Completed = 1, } - class TransactionQueue { app: Shardus.App crypto: Crypto @@ -266,919 +235,6 @@ class TransactionQueue { this.debugRecentQueueEntry = null } - /*** - * ######## ## ## ######## ######## ####### #### ## ## ######## ###### - * ## ### ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## #### ## ## ## ## ## ## ## ## #### ## ## ## - * ###### ## ## ## ## ## ######## ## ## ## ## ## ## ## ###### - * ## ## #### ## ## ## ## ## ## ## #### ## ## - * ## ## ### ## ## ## ## ## ## ## ### ## ## ## - * ######## ## ## ######## ## ####### #### ## ## ## ###### - */ - - setupHandlers(): void { - // this.p2p.registerInternal( - // 'broadcast_state', - // async (payload: { txid: string; stateList: Shardus.WrappedResponse[] }) => { - // profilerInstance.scopedProfileSectionStart('broadcast_state') - // try { - // // Save the wrappedAccountState with the rest our queue data - // // let message = { stateList: datas, txid: queueEntry.acceptedTX.id } - // // this.p2p.tell([correspondingEdgeNode], 'broadcast_state', message) - - // // make sure we have it - // const queueEntry = this.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // //It is okay to ignore this transaction if the txId is not found in the queue. - // if (queueEntry == null) { - // //In the past we would enqueue the TX, expecially if syncing but that has been removed. - // //The normal mechanism of sharing TXs is good enough. - // nestedCountersInstance.countEvent('processing', 'broadcast_state_noQueueEntry') - // return - // } - // // add the data in - // for (const data of payload.stateList) { - // if ( - // configContext.stateManager.collectedDataFix && - // configContext.stateManager.rejectSharedDataIfCovered - // ) { - // const consensusNodes = this.stateManager.transactionQueue.getConsenusGroupForAccount( - // data.accountId - // ) - // const coveredByUs = consensusNodes.map((node) => node.id).includes(Self.id) - // if (coveredByUs) { - // nestedCountersInstance.countEvent('processing', 'broadcast_state_coveredByUs') - // /* prettier-ignore */ if (logFlags.verbose) console.log(`broadcast_state: coveredByUs: ${data.accountId} no need to accept this data`) - // continue - // } else { - // this.queueEntryAddData(queueEntry, data) - // } - // } else { - // this.queueEntryAddData(queueEntry, data) - // } - - // if (queueEntry.state === 'syncing') { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastData', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${data.accountId}`) - // } - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('broadcast_state') - // } - // } - // ) - - // this.p2p.registerInternal( - // 'broadcast_state_complete_data', - // async (payload: { txid: string; stateList: Shardus.WrappedResponse[] }) => { - // profilerInstance.scopedProfileSectionStart('broadcast_state_complete_data') - // try { - // const queueEntry = this.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // nestedCountersInstance.countEvent('processing', 'broadcast_state_complete_data_noQueueEntry') - // return - // } - // if (queueEntry.gossipedCompleteData === true) { - // return - // } - // for (const data of payload.stateList) { - // if (configContext.stateManager.collectedDataFix && configContext.stateManager.rejectSharedDataIfCovered) { - // const consensusNodes = this.stateManager.transactionQueue.getConsenusGroupForAccount(data.accountId) - // const coveredByUs = consensusNodes.map((node) => node.id).includes(Self.id) - // if (coveredByUs) { - // nestedCountersInstance.countEvent('processing', 'broadcast_state_coveredByUs') - // /* prettier-ignore */ if (logFlags.verbose) console.log(`broadcast_state: coveredByUs: ${data.accountId} no need to accept this data`) - // continue - // } else { - // this.queueEntryAddData(queueEntry, data) - // } - // } else { - // this.queueEntryAddData(queueEntry, data) - // } - // } - // Comms.sendGossip( - // 'broadcast_state_complete_data', - // payload, - // undefined, - // undefined, - // queueEntry.executionGroup, - // false, - // 6, - // queueEntry.acceptedTx.txId - // ) - // queueEntry.gossipedCompleteData = true - // } finally { - // profilerInstance.scopedProfileSectionEnd('broadcast_state_complete_data') - // } - // } - // ) - - const broadcastStateRoute: P2PTypes.P2PTypes.Route> = { - name: InternalRouteEnum.binary_broadcast_state, - // eslint-disable-next-line @typescript-eslint/no-unused-vars - handler: (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_broadcast_state - nestedCountersInstance.countEvent('internal', route) - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - try { - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cBroadcastStateReq) - if (!requestStream) { - return errorHandler(RequestErrorEnum.InvalidRequest) - } - // verification data checks - if (header.verification_data == null) { - return errorHandler(RequestErrorEnum.MissingVerificationData) - } - const verificationDataParts = verificationDataSplitter(header.verification_data) - if (verificationDataParts.length !== 3) { - return errorHandler(RequestErrorEnum.InvalidVerificationData) - } - const [vTxId, vStateSize, vStateAddress] = verificationDataParts - const queueEntry = this.getQueueEntrySafe(vTxId) - //It is okay to ignore this transaction if the txId is not found in the queue. - if (queueEntry == null) { - /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`${route} cant find queueEntry for: ${utils.makeShortHash(vTxId)}`) - return errorHandler(RequestErrorEnum.InvalidVerificationData, { - customCounterSuffix: 'queueEntryNotFound', - }) - } - - const req = deserializeBroadcastStateReq(requestStream) - if (req.txid !== vTxId) { - return errorHandler(RequestErrorEnum.InvalidVerificationData) - } - - if (req.stateList.length !== parseInt(vStateSize)) { - return errorHandler(RequestErrorEnum.InvalidVerificationData) - } - /* prettier-ignore */ if (logFlags.verbose && logFlags.console) console.log(`${route}: txId: ${req.txid} stateSize: ${req.stateList.length} stateAddress: ${vStateAddress}`) - - const senderNodeId = header.sender_id - let isSenderOurExeNeighbour = false - const senderIsInExecutionGroup = queueEntry.executionGroupMap.has(senderNodeId) - const neighbourNodes = utils.selectNeighbors( - queueEntry.executionGroup, - queueEntry.ourExGroupIndex, - 2 - ) as Shardus.Node[] - const neighbourNodeIds = neighbourNodes.map((node) => node.id) - isSenderOurExeNeighbour = senderIsInExecutionGroup && neighbourNodeIds.includes(senderNodeId) - - // sender verification loop - for (let i = 0; i < req.stateList.length; i++) { - // eslint-disable-next-line security/detect-object-injection - const state = req.stateList[i] - let isSenderValid = false - if (configContext.p2p.useFactCorrespondingTell) { - // check if it is a neighbour exe node sharing data - if (configContext.stateManager.shareCompleteData) { - if (isSenderOurExeNeighbour) { - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellSender: sender is an execution node and a neighbour node' - ) - isSenderValid = true - } else { - // check if it is a corresponding tell sender - isSenderValid = this.factValidateCorrespondingTellSender(queueEntry, state.accountId, senderNodeId) - } - } else { - // check if it is a corresponding tell sender - isSenderValid = this.factValidateCorrespondingTellSender(queueEntry, state.accountId, senderNodeId) - } - } else { - isSenderValid = this.validateCorrespondingTellSender(queueEntry, state.accountId, senderNodeId) - } - - if ( - this.stateManager.testFailChance( - configContext.debug.ignoreDataTellChance, - 'ignoreDataTellChance', - queueEntry.logID, - '', - logFlags.verbose - ) === true - ) { - isSenderValid = false - } - - if (isSenderValid === false) { - this.mainLogger.error(`${route} validateCorrespondingTellSender failed for ${state.accountId}`) - nestedCountersInstance.countEvent('processing', 'validateCorrespondingTellSender failed') - return errorHandler(RequestErrorEnum.InvalidSender) - } - } - // update loop - for (let i = 0; i < req.stateList.length; i++) { - // eslint-disable-next-line security/detect-object-injection - const state = req.stateList[i] - - if (configContext.stateManager.collectedDataFix && configContext.stateManager.rejectSharedDataIfCovered) { - const consensusNodes = this.stateManager.transactionQueue.getConsenusGroupForAccount(state.accountId) - const coveredByUs = consensusNodes.map((node) => node.id).includes(Self.id) - if (coveredByUs) { - nestedCountersInstance.countEvent('processing', 'broadcast_state_coveredByUs') - /* prettier-ignore */ if (logFlags.verbose) console.log(`broadcast_state: coveredByUs: ${state.accountId} no need to accept this data`) - continue - } else { - this.queueEntryAddData(queueEntry, state) - } - } else { - this.queueEntryAddData(queueEntry, state) - } - if (queueEntry.state === 'syncing') { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastData', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${state.accountId}`) - } - } - } catch (e) { - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) - } finally { - profilerInstance.scopedProfileSectionEnd(route, payload.length) - } - }, - } - - this.p2p.registerInternalBinary(broadcastStateRoute.name, broadcastStateRoute.handler) - - // const broadcastFinalStateRoute: P2PTypes.P2PTypes.Route> = { - // name: InternalRouteEnum.binary_broadcast_finalstate, - // // eslint-disable-next-line @typescript-eslint/no-unused-vars - // handler: (payload, response, header, sign) => { - // const route = InternalRouteEnum.binary_broadcast_finalstate - // nestedCountersInstance.countEvent('internal', route) - // profilerInstance.scopedProfileSectionStart(route, false, payload.length) - // const errorHandler = ( - // errorType: RequestErrorEnum, - // opts?: { customErrorLog?: string; customCounterSuffix?: string } - // ): void => requestErrorHandler(route, errorType, header, opts) - - // try { - // const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cBroadcastFinalStateReq) - // if (!requestStream) { - // return errorHandler(RequestErrorEnum.InvalidRequest) - // } - - // // verification data checks - // if (header.verification_data == null) { - // return errorHandler(RequestErrorEnum.MissingVerificationData) - // } - // const verificationDataParts = verificationDataSplitter(header.verification_data) - // if (verificationDataParts.length !== 2) { - // return errorHandler(RequestErrorEnum.InvalidVerificationData) - // } - // const [vTxId, vStateSize] = verificationDataParts - // const queueEntry = this.getQueueEntrySafe(vTxId) - // //It is okay to ignore this transaction if the txId is not found in the queue. - // if (queueEntry == null) { - // /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`${route} cant find queueEntry for: ${utils.makeShortHash(vTxId)}`) - // return errorHandler(RequestErrorEnum.InvalidVerificationData, { - // customCounterSuffix: 'queueEntryNotFound', - // }) - // } - - // // deserialization - // const req = deserializeBroadcastFinalStateReq(requestStream) - // if (req.txid !== vTxId) { - // return errorHandler(RequestErrorEnum.InvalidVerificationData) - // } - - // if (req.stateList.length !== parseInt(vStateSize)) { - // return errorHandler(RequestErrorEnum.InvalidVerificationData) - // } - - // /* prettier-ignore */ if (logFlags.verbose && logFlags.console) console.log(`${route}: txId: ${req.txid} stateSize: ${req.stateList.length}`) - // let saveSomething = false - // for (const data of req.stateList) { - // //let wrappedResponse = data as Shardus.WrappedResponse - // //this.queueEntryAddData(queueEntry, data) - // if (data == null) { - // /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`broadcast_finalstate data == null`) - // continue - // } - // const isValidFinalDataSender = this.factValidateCorrespondingTellFinalDataSender(queueEntry, header.sender_id) - // if (isValidFinalDataSender === false) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`broadcast_finalstate invalid sender ${header.sender_id} for data: ${data.accountId}`) - // return errorHandler(RequestErrorEnum.InvalidSender); - // } - // } - // for (const data of req.stateList) { - // if (data == null) { - // /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`broadcast_finalstate data == null`) - // continue - // } - // if (queueEntry.collectedFinalData[data.accountId] == null) { - // queueEntry.collectedFinalData[data.accountId] = data - // saveSomething = true - // /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('broadcast_finalstate', `${queueEntry.logID}`, `broadcast_finalstate addFinalData qId: ${queueEntry.entryID} data:${utils.makeShortHash(data.accountId)} collected keys: ${utils.stringifyReduce(Object.keys(queueEntry.collectedFinalData))}`) - // } - // } - // } catch (e) { - // nestedCountersInstance.countEvent('internal', `${route}-exception`) - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) - // } finally { - // profilerInstance.scopedProfileSectionEnd(route, payload.length) - // } - // }, - // } - - // this.p2p.registerInternalBinary(broadcastFinalStateRoute.name, broadcastFinalStateRoute.handler) - - // this.p2p.registerInternal( - // 'spread_tx_to_group_syncing', - // async (payload: Shardus.AcceptedTx, _respondWrapped: unknown, sender: Node) => { - // profilerInstance.scopedProfileSectionStart('spread_tx_to_group_syncing') - // try { - // //handleSharedTX will also validate fields - // this.handleSharedTX(payload.data, payload.appData, sender) - // } finally { - // profilerInstance.scopedProfileSectionEnd('spread_tx_to_group_syncing') - // } - // } - // ) - - const spreadTxToGroupSyncingBinaryHandler: P2PTypes.P2PTypes.Route> = { - name: InternalRouteEnum.binary_spread_tx_to_group_syncing, - // eslint-disable-next-line @typescript-eslint/no-unused-vars - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_spread_tx_to_group_syncing - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, false, payload.length) - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - try { - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cSpreadTxToGroupSyncingReq) - if (!requestStream) { - return errorHandler(RequestErrorEnum.InvalidRequest) - } - - const req: SpreadTxToGroupSyncingReq = deserializeSpreadTxToGroupSyncingReq(requestStream) - - const ajvErrors = verifyPayload(AJVSchemaEnum.SpreadTxToGroupSyncingReq, req) - if (ajvErrors && ajvErrors.length > 0) { - this.mainLogger.error(`${route}: request validation errors: ${ajvErrors}`) - return errorHandler(RequestErrorEnum.InvalidPayload) - } - - const node = this.p2p.state.getNode(header.sender_id) - this.handleSharedTX(req.data, req.appData, node) - } catch (e) { - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${route}: Exception executing request: ${errorToStringFull(e)}`) - } finally { - this.profiler.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary( - spreadTxToGroupSyncingBinaryHandler.name, - spreadTxToGroupSyncingBinaryHandler.handler - ) - - this.p2p.registerGossipHandler( - 'spread_tx_to_group', - async ( - payload: { data: Shardus.TimestampedTx; appData: unknown }, - sender: Node, - tracker: string, - msgSize: number - ) => { - profilerInstance.scopedProfileSectionStart('spread_tx_to_group', false, msgSize) - let respondSize = cUninitializedSize - try { - // Place tx in queue (if younger than m) - // gossip 'spread_tx_to_group' to transaction group - - //handleSharedTX will also validate fields. payload is an AcceptedTX so must pass in the .data as the rawTX - const queueEntry = this.handleSharedTX(payload.data, payload.appData, sender) - if (queueEntry == null) { - return - } - - // get transaction group - const transactionGroup = this.queueEntryGetTransactionGroup(queueEntry) - if (queueEntry.ourNodeInTransactionGroup === false) { - return - } - if (transactionGroup.length > 1) { - this.stateManager.debugNodeGroup( - queueEntry.acceptedTx.txId, - queueEntry.acceptedTx.timestamp, - `spread_tx_to_group transactionGroup:`, - transactionGroup - ) - respondSize = await this.p2p.sendGossipIn( - 'spread_tx_to_group', - payload, - tracker, - sender, - transactionGroup, - false, - -1, - queueEntry.acceptedTx.txId - ) - /* prettier-ignore */ if (logFlags.verbose) console.log( 'queueEntry.isInExecutionHome', queueEntry.acceptedTx.txId, queueEntry.isInExecutionHome ) - // If our node is in the execution group, forward this raw tx to the subscribed archivers - if (queueEntry.isInExecutionHome === true) { - this.addOriginalTxDataToForward(queueEntry) - } - } - } finally { - profilerInstance.scopedProfileSectionEnd('spread_tx_to_group', respondSize) - } - } - ) - - // THIS HANDLER IS NOT USED ANYMORE - // this.p2p.registerGossipHandler( - // 'gossip-final-state', - // async ( - // payload: { txid: string; stateList: Shardus.WrappedResponse[], txGroupCycle?: number }, - // sender: Node, - // tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('gossip-final-state', false, msgSize) - // const respondSize = cUninitializedSize - // try { - // // make sure we have it - // const queueEntry = this.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // //It is okay to ignore this transaction if the txId is not found in the queue. - // if (queueEntry == null) { - // //In the past we would enqueue the TX, expecially if syncing but that has been removed. - // //The normal mechanism of sharing TXs is good enough. - // nestedCountersInstance.countEvent('processing', 'gossip-final-state_noQueueEntry') - // return - // } - // if (payload.txGroupCycle) { - // if (queueEntry.txGroupCycle !== payload.txGroupCycle) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`gossip-final-state mismatch txGroupCycle for txid: ${payload.txid}, sender's txGroupCycle: ${payload.txGroupCycle}, our txGroupCycle: ${queueEntry.txGroupCycle}`) - // nestedCountersInstance.countEvent( - // 'processing', - // 'gossip-final-state: mismatch txGroupCycle for txid ' + payload.txid - // ) - // } - // delete payload.txGroupCycle - // } - // if (logFlags.debug) - // this.mainLogger.debug(`gossip-final-state ${queueEntry.logID}, ${Utils.safeStringify(payload.stateList)}`) - // // add the data in - // let saveSomething = false - // for (const data of payload.stateList) { - // //let wrappedResponse = data as Shardus.WrappedResponse - // //this.queueEntryAddData(queueEntry, data) - // if (data == null) { - // /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`broadcast_finalstate data == null`) - // continue - // } - // if (queueEntry.collectedFinalData[data.accountId] == null) { - // queueEntry.collectedFinalData[data.accountId] = data - // saveSomething = true - // /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('broadcast_finalstate', `${queueEntry.logID}`, `broadcast_finalstate addFinalData qId: ${queueEntry.entryID} data:${utils.makeShortHash(data.accountId)} collected keys: ${utils.stringifyReduce(Object.keys(queueEntry.collectedFinalData))}`) - // } - // - // // if (queueEntry.state === 'syncing') { - // // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_gotBroadcastfinalstate', `${queueEntry.acceptedTx.txId}`, ` qId: ${queueEntry.entryID} data:${data.accountId}`) - // // } - // } - // if (saveSomething) { - // const nodesToSendTo: Set = new Set() - // for (const data of payload.stateList) { - // if (data == null) { - // continue - // } - // const storageNodes = this.stateManager.transactionQueue.getStorageGroupForAccount(data.accountId) - // for (const node of storageNodes) { - // nodesToSendTo.add(node) - // } - // } - // if (nodesToSendTo.size > 0) { - // payload.txGroupCycle = queueEntry.txGroupCycle - // Comms.sendGossip( - // 'gossip-final-state', - // payload, - // undefined, - // undefined, - // Array.from(nodesToSendTo), - // false, - // 4, - // queueEntry.acceptedTx.txId - // ) - // nestedCountersInstance.countEvent(`processing`, `forwarded final data to storage nodes`) - // } - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('gossip-final-state', respondSize) - // } - // } - // ) - - /** - * request_state_for_tx - * used by the transaction queue when a queue entry needs to ask for missing state - */ - // this.p2p.registerInternal( - // 'request_state_for_tx', - // async (payload: RequestStateForTxReq, respond: (arg0: RequestStateForTxResp) => unknown) => { - // profilerInstance.scopedProfileSectionStart('request_state_for_tx') - // try { - // const response: RequestStateForTxResp = { - // stateList: [], - // beforeHashes: {}, - // note: '', - // success: false, - // } - // // app.getRelevantData(accountId, tx) -> wrappedAccountState for local accounts - // let queueEntry = this.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // queueEntry = this.getQueueEntryArchived(payload.txid, 'request_state_for_tx') // , payload.timestamp) - // } - - // if (queueEntry == null) { - // response.note = `failed to find queue entry: ${utils.stringifyReduce(payload.txid)} ${ - // payload.timestamp - // } dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(payload.txid)]}` - // await respond(response) - // // if a node cant get data it will have to get repaired by the patcher since we can only keep stuff en the archive queue for so long - // // due to memory concerns - // return - // } - - // for (const key of payload.keys) { - // // eslint-disable-next-line security/detect-object-injection - // const data = queueEntry.originalData[key] // collectedData - // if (data) { - // //response.stateList.push(JSON.parse(data)) - // response.stateList.push(data) - // } - // } - // response.success = true - // await respond(response) - // } finally { - // profilerInstance.scopedProfileSectionEnd('request_state_for_tx') - // } - // } - // ) - - const requestStateForTxRoute: P2PTypes.P2PTypes.Route> = { - name: InternalRouteEnum.binary_request_state_for_tx, - handler: (payload, respond) => { - const route = InternalRouteEnum.binary_request_state_for_tx - profilerInstance.scopedProfileSectionStart(route) - nestedCountersInstance.countEvent('internal', route) - - const response: RequestStateForTxRespSerialized = { - stateList: [], - beforeHashes: {}, - note: '', - success: false, - } - try { - const responseStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestStateForTxReq) - if (!responseStream) { - this.mainLogger.error(`${route}: Invalid request`) - respond(response, serializeRequestStateForTxResp) - return - } - const req = deserializeRequestStateForTxReq(responseStream) - if (req.txid == null) { - throw new Error('Txid is null') - } - let queueEntry = this.getQueueEntrySafe(req.txid) - if (queueEntry == null) { - queueEntry = this.getQueueEntryArchived(req.txid, InternalRouteEnum.binary_request_state_for_tx) - } - - if (queueEntry == null) { - response.note = `failed to find queue entry: ${utils.stringifyReduce(req.txid)} ${req.timestamp} dbg:${ - this.stateManager.debugTXHistory[utils.stringifyReduce(req.txid)] - }` - respond(response, serializeRequestStateForTxResp) - // if a node cant get data it will have to get repaired by the patcher since we can only keep stuff en the archive queue for so long - // due to memory concerns - return - } - - for (const key of req.keys) { - // eslint-disable-next-line security/detect-object-injection - const data = queueEntry.originalData[key] // collectedData - if (data) { - response.stateList.push(data) - } - } - response.success = true - respond(response, serializeRequestStateForTxResp) - } catch (e) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`${InternalRouteEnum.binary_request_state_for_tx}: Exception executing request: ${errorToStringFull(e)}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - respond(response, serializeRequestStateForTxResp) - } finally { - profilerInstance.scopedProfileSectionEnd(InternalRouteEnum.binary_request_state_for_tx) - } - }, - } - - this.p2p.registerInternalBinary(requestStateForTxRoute.name, requestStateForTxRoute.handler) - - networkContext.registerExternalPost('get-tx-receipt', async (req, res) => { - let result: { success: boolean; receipt?: ArchiverReceipt | SignedReceipt; reason?: string } - try { - let error = utils.validateTypes(req.body, { - txId: 's', - timestamp: 'n', - full_receipt: 'b', - sign: 'o', - }) - if (error) { - res.json((result = { success: false, reason: error })) - return - } - error = utils.validateTypes(req.body.sign, { - owner: 's', - sig: 's', - }) - if (error) { - res.json((result = { success: false, reason: error })) - return - } - - const { txId, timestamp, full_receipt, sign } = req.body - const isReqFromArchiver = Archivers.archivers.has(sign.owner) - if (!isReqFromArchiver) { - result = { success: false, reason: 'Request not from Archiver.' } - } else { - const isValidSignature = this.crypto.verify(req.body, sign.owner) - if (isValidSignature) { - let queueEntry: QueueEntry - if ( - this.archivedQueueEntriesByID.has(txId) && - this.archivedQueueEntriesByID.get(txId)?.acceptedTx?.timestamp === timestamp - ) { - if (logFlags.verbose) console.log('get-tx-receipt: ', txId, timestamp, 'archived') - queueEntry = this.archivedQueueEntriesByID.get(txId) - } else if ( - this._transactionQueueByID.has(txId) && - this._transactionQueueByID.get(txId)?.state === 'commiting' && - this._transactionQueueByID.get(txId)?.acceptedTx?.timestamp === timestamp - ) { - if (logFlags.verbose) console.log('get-tx-receipt: ', txId, timestamp, 'commiting') - queueEntry = this._transactionQueueByID.get(txId) - } - if (!queueEntry) { - res.status(400).json({ success: false, reason: 'Receipt Not Found.' }) - return - } - if (full_receipt) { - const fullReceipt: ArchiverReceipt = await this.getArchiverReceiptFromQueueEntry(queueEntry) - if (fullReceipt === null) { - res.status(400).json({ success: false, reason: 'Receipt Not Found.' }) - return - } - result = Utils.safeJsonParse(Utils.safeStringify({ success: true, receipt: fullReceipt })) - } else { - result = { success: true, receipt: this.stateManager.getSignedReceipt(queueEntry) } - } - } else { - result = { success: false, reason: 'Invalid Signature.' } - } - } - res.json(result) - } catch (e) { - console.log('Error caught in /get-tx-receipt: ', e) - res.json((result = { success: false, reason: e })) - } - }) - } - - isTxInPendingNonceQueue(accountId: string, txId: string): boolean { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`isTxInPendingNonceQueue ${accountId} ${txId}`, this.nonceQueue) - const queue = this.nonceQueue.get(accountId) - if (queue == null) { - return false - } - for (const item of queue) { - if (item.txId === txId) { - return true - } - } - return false - } - - getPendingCountInNonceQueue(): { totalQueued: number; totalAccounts: number; avgQueueLength: number } { - let totalQueued = 0 - let totalAccounts = 0 - for (const queue of this.nonceQueue.values()) { - totalQueued += queue.length - totalAccounts++ - } - const avgQueueLength = totalQueued / totalAccounts - return { totalQueued, totalAccounts, avgQueueLength } - } - - addTransactionToNonceQueue(nonceQueueEntry: NonceQueueItem): { - success: boolean - reason?: string - alreadyAdded?: boolean - } { - try { - let queue = this.nonceQueue.get(nonceQueueEntry.accountId) - if (queue == null || (Array.isArray(queue) && queue.length === 0)) { - queue = [nonceQueueEntry] - this.nonceQueue.set(nonceQueueEntry.accountId, queue) - if (logFlags.debug) - this.mainLogger.debug( - `adding new nonce tx: ${nonceQueueEntry.txId} ${nonceQueueEntry.accountId} with nonce ${nonceQueueEntry.nonce}` - ) - } else if (queue && queue.length > 0) { - const index = utils.binarySearch(queue, nonceQueueEntry, (a, b) => Number(a.nonce) - Number(b.nonce)) - - if (index >= 0) { - // there is existing item with the same nonce. replace it with the new one - queue[index] = nonceQueueEntry - this.nonceQueue.set(nonceQueueEntry.accountId, queue) - nestedCountersInstance.countEvent('processing', 'replaceExistingNonceTx') - if (logFlags.debug) - this.mainLogger.debug( - `replace existing nonce tx ${nonceQueueEntry.accountId} with nonce ${nonceQueueEntry.nonce}, txId: ${nonceQueueEntry.txId}` - ) - return { success: true, reason: 'Replace existing pending nonce tx', alreadyAdded: true } - } - // add new item to the queue - utils.insertSorted(queue, nonceQueueEntry, (a, b) => Number(a.nonce) - Number(b.nonce)) - this.nonceQueue.set(nonceQueueEntry.accountId, queue) - } - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455106 ${shardusGetTime()} tx:${nonceQueueEntry.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: pause_nonceQ`) - nestedCountersInstance.countEvent('processing', 'addTransactionToNonceQueue') - if (logFlags.debug) - this.mainLogger.debug( - `Added tx to nonce queue for ${nonceQueueEntry.accountId} with nonce ${nonceQueueEntry.nonce} nonceQueue: ${queue.length}` - ) - return { success: true, reason: `Nonce queue size for account: ${queue.length}`, alreadyAdded: false } - } catch (e) { - nestedCountersInstance.countEvent('processing', 'addTransactionToNonceQueueError') - this.mainLogger.error( - `Error adding tx to nonce queue: ${e.message}, tx: ${utils.stringifyReduce(nonceQueueEntry)}` - ) - return { success: false, reason: e.message, alreadyAdded: false } - } - } - async processNonceQueue(accounts: Shardus.WrappedData[]): Promise { - for (const account of accounts) { - const queue = this.nonceQueue.get(account.accountId) - if (queue == null) { - continue - } - for (const item of queue) { - const accountNonce = await this.app.getAccountNonce(account.accountId, account) - if (item.nonce === accountNonce) { - nestedCountersInstance.countEvent('processing', 'processNonceQueue foundMatchingNonce') - if (logFlags.debug) - this.mainLogger.debug( - `Found matching nonce in queue or ${account.accountId} with nonce ${item.nonce}`, - item - ) - item.appData.requestNewTimestamp = true - - // start of timestamp logging - if (logFlags.important_as_error) { - const txTimestamp = this.app.getTimestampFromTransaction(item.tx, item.appData) - const nowNodeTimestamp = shardusGetTime() - const delta = nowNodeTimestamp - txTimestamp - const ntpOffset = getNetworkTimeOffset() - /* prettier-ignore */ console.log(`TxnTS: pre _timestampAndQueueTransaction txTimestamp=${txTimestamp}, nowNodeTimestamp=${nowNodeTimestamp}, delta=${delta}, ntpOffset=${ntpOffset}, txID=${item.txId}`) - } - // end of timestamp logging. - - await this.stateManager.shardus._timestampAndQueueTransaction( - item.tx, - item.appData, - item.global, - item.noConsensus, - 'nonceQueue' - ) - - // start of timestamp logging - if (logFlags.important_as_error) { - const txTimestamp = this.app.getTimestampFromTransaction(item.tx, item.appData) - const nowNodeTimestamp = shardusGetTime() - const delta = nowNodeTimestamp - txTimestamp - const ntpOffset = getNetworkTimeOffset() - /* prettier-ignore */ console.log(`TxnTS: post _timestampAndQueueTransaction txTimestamp=${txTimestamp}, nowNodeTimestamp=${nowNodeTimestamp}, delta=${delta}, ntpOffset=${ntpOffset}, txID=${item.txId}`) - } - // end of timestamp logging. - - // remove the item from the queue - const index = queue.indexOf(item) - queue.splice(index, 1) - - //we should break here. we keep looking up account values after we go to the step needed. - //this assumes we will not put two TXs with the same nonce value in the queue. - break - } - } - } - } - handleSharedTX(tx: Shardus.TimestampedTx, appData: unknown, sender: Shardus.Node): QueueEntry { - profilerInstance.profileSectionStart('handleSharedTX') - const internalTx = this.app.isInternalTx(tx) - if ((internalTx && !isInternalTxAllowed()) || (!internalTx && networkMode !== 'processing')) { - profilerInstance.profileSectionEnd('handleSharedTX') - // Block invalid txs in case a node maliciously relays them to other nodes - return null - } - if (!internalTx && !this.config.p2p.allowEndUserTxnInjections) { - profilerInstance.profileSectionEnd('handleSharedTX') - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tx_non_internal_tx_paused', '', 'execution paused for non-internal tx') - return null - } - // Perform fast validation of the transaction fields - profilerInstance.scopedProfileSectionStart('handleSharedTX_validateTX') - const validateResult = this.app.validate(tx, appData) - profilerInstance.scopedProfileSectionEnd('handleSharedTX_validateTX') - if (validateResult.success === false) { - this.statemanager_fatal( - `spread_tx_to_group_validateTX`, - `spread_tx_to_group validateTxnFields failed: ${utils.stringifyReduce(validateResult)}` - ) - profilerInstance.profileSectionEnd('handleSharedTX') - return null - } - - // Ask App to crack open tx and return timestamp, id (hash), and keys - const { timestamp, id, keys, shardusMemoryPatterns } = this.app.crack(tx, appData) - - // Check if we already have this tx in our queue - let queueEntry = this.getQueueEntrySafe(id) // , payload.timestamp) - if (queueEntry) { - profilerInstance.profileSectionEnd('handleSharedTX') - return null - } - - // Need to review these timeouts before main net. what bad things can happen by setting a timestamp too far in the future or past. - // only a subset of transactions can have timestamp set by the sender while others use independent consensus (askTxnTimestampFromNode) - // but that is up to the dapp - const mostOfQueueSitTimeMs = this.stateManager.queueSitTime * 0.9 - const txExpireTimeMs = this.config.transactionExpireTime * 1000 - const age = shardusGetTime() - timestamp - if (inRangeOfCurrentTime(timestamp, mostOfQueueSitTimeMs, txExpireTimeMs) === false) { - /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal( `spread_tx_to_group_OldTx_or_tooFuture`, 'spread_tx_to_group cannot accept tx with age: ' + age ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_spread_tx_to_groupToOldOrTooFuture', '', 'spread_tx_to_group working on tx with age: ' + age) - profilerInstance.profileSectionEnd('handleSharedTX') - return null - } - - // Pack into AcceptedTx for routeAndQueueAcceptedTransaction - const acceptedTx: AcceptedTx = { - timestamp, - txId: id, - keys, - data: tx, - appData, - shardusMemoryPatterns, - } - - const noConsensus = false // this can only be true for a set command which will never come from an endpoint - const added = this.routeAndQueueAcceptedTransaction( - acceptedTx, - /*sendGossip*/ false, - sender, - /*globalModification*/ false, - noConsensus - ) - if (added === 'lost') { - profilerInstance.profileSectionEnd('handleSharedTX') - return null // we are faking that the message got lost so bail here - } - if (added === 'out of range') { - profilerInstance.profileSectionEnd('handleSharedTX') - return null - } - if (added === 'notReady') { - profilerInstance.profileSectionEnd('handleSharedTX') - return null - } - queueEntry = this.getQueueEntrySafe(id) //, payload.timestamp) // now that we added it to the queue, it should be possible to get the queueEntry now - - if (queueEntry == null) { - // do not gossip this, we are not involved - // downgrading, this does not seem to be fatal, but may need further logs/testing - //this.statemanager_fatal(`spread_tx_to_group_noQE`, `spread_tx_to_group failed: cant find queueEntry for: ${utils.makeShortHash(payload.id)}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('spread_tx_to_group_noQE', '', `spread_tx_to_group failed: cant find queueEntry for: ${utils.makeShortHash(id)}`) - profilerInstance.profileSectionEnd('handleSharedTX') - return null - } - - profilerInstance.profileSectionEnd('handleSharedTX') - return queueEntry - } - /*** * ### ######## ######## ###### ######## ### ######## ######## * ## ## ## ## ## ## ## ## ## ## ## ## ## @@ -1835,6563 +891,90 @@ class TransactionQueue { } /* eslint-enable security/detect-object-injection */ - /*** - * ######## ## ## ####### ## ## ######## ## ## ######## - * ## ### ## ## ## ## ## ## ## ## ## - * ## #### ## ## ## ## ## ## ## ## ## - * ###### ## ## ## ## ## ## ## ###### ## ## ###### - * ## ## #### ## ## ## ## ## ## ## ## ## - * ## ## ### ## ## ## ## ## ## ## ## - * ######## ## ## ##### ## ####### ######## ####### ######## - */ - routeAndQueueAcceptedTransaction( - acceptedTx: AcceptedTx, - sendGossip = true, - sender: Shardus.Node | null, - globalModification: boolean, - noConsensus: boolean - ): string | boolean { - // dropping these too early.. hmm we finished syncing before we had the first shard data. - // if (this.stateManager.currentCycleShardData == null) { - // // this.preTXQueue.push(acceptedTX) - // return 'notReady' // it is too early to care about the tx - // } - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('routeAndQueueAcceptedTransaction-debug', '', `sendGossip:${sendGossip} globalModification:${globalModification} noConsensus:${noConsensus} this.readyforTXs:${this.stateManager.accountSync.readyforTXs} hasshardData:${this.stateManager.currentCycleShardData != null} acceptedTx:${utils.stringifyReduce(acceptedTx)} `) - if (this.stateManager.accountSync.readyforTXs === false) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction too early for TX: this.readyforTXs === false`) - return 'notReady' // it is too early to care about the tx - } - if (this.stateManager.currentCycleShardData == null) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction too early for TX: this.stateManager.currentCycleShardData == null`) - return 'notReady' - } + // compute the rand of the node where rank = node_id XOR hash(tx_id + tx_ts) + computeNodeRank(nodeId: string, txId: string, txTimestamp: number): bigint { + if (nodeId == null || txId == null || txTimestamp == null) return BigInt(0) + const hash = this.crypto.hash([txId, txTimestamp]) + return BigInt(XOR(nodeId, hash)) + } - try { - this.profiler.profileSectionStart('enqueue') + // sort the nodeList by rank, in descending order + orderNodesByRank(nodeList: Shardus.Node[], queueEntry: QueueEntry): Shardus.NodeWithRank[] { + const nodeListWithRankData: Shardus.NodeWithRank[] = [] - if (this.stateManager.accountGlobals.hasknownGlobals == false) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction too early for TX: hasknownGlobals == false`) - return 'notReady' + for (let i = 0; i < nodeList.length; i++) { + const node: Shardus.Node = nodeList[i] + const rank = this.computeNodeRank(node.id, queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp) + const nodeWithRank: Shardus.NodeWithRank = { + rank, + id: node.id, + status: node.status, + publicKey: node.publicKey, + externalIp: node.externalIp, + externalPort: node.externalPort, + internalIp: node.internalIp, + internalPort: node.internalPort, } + nodeListWithRankData.push(nodeWithRank) + } + return nodeListWithRankData.sort((a: Shardus.NodeWithRank, b: Shardus.NodeWithRank) => { + return b.rank > a.rank ? 1 : -1 + }) + } - const keysResponse = acceptedTx.keys - const timestamp = acceptedTx.timestamp - const txId = acceptedTx.txId + resetReceiptsToForward(): void { + const MAX_RECEIPT_AGE_MS = 15000 // 15s + const now = shardusGetTime() + // Clear receipts that are older than MAX_RECEIPT_AGE_MS + for (const [key] of this.forwardedReceiptsByTimestamp) { + if (now - key > MAX_RECEIPT_AGE_MS) { + this.forwardedReceiptsByTimestamp.delete(key) + } + } + } - // This flag turns of consensus for all TXs for debuggging - if (this.stateManager.debugNoTxVoting === true) { - noConsensus = true + /** + * Helper for processQueue to dump debug info + * @param queueEntry + * @param app + */ + processQueue_debugAccountData(queueEntry: QueueEntry, app: Shardus.App): string { + let debugStr = '' + //if (logFlags.verbose) { //this function is always verbose + if (queueEntry.uniqueKeys == null) { + //TSConversion double check if this needs extra logging + return queueEntry.logID + ' uniqueKeys empty error' + } + /* eslint-disable security/detect-object-injection */ + for (const key of queueEntry.uniqueKeys) { + if (queueEntry.collectedData[key] != null) { + debugStr += utils.makeShortHash(key) + ' : ' + app.getAccountDebugValue(queueEntry.collectedData[key]) + ', ' } + } + /* eslint-enable security/detect-object-injection */ + //} + return debugStr + } - if (configContext.stateManager.waitUpstreamTx) { - const keysToCheck = [] - if (acceptedTx.shardusMemoryPatterns && acceptedTx.shardusMemoryPatterns.rw) { - keysToCheck.push(...acceptedTx.shardusMemoryPatterns.rw) - } - if (acceptedTx.shardusMemoryPatterns && acceptedTx.shardusMemoryPatterns.wo) { - keysToCheck.push(...acceptedTx.shardusMemoryPatterns.wo) - } - if (keysToCheck.length === 0) { - const sourceKey = acceptedTx.keys.sourceKeys[0] - keysToCheck.push(sourceKey) - } - for (const key of keysToCheck) { - const isAccountInQueue = this.isAccountInQueue(key) - if (isAccountInQueue) { - nestedCountersInstance.countEvent( - 'stateManager', - `cancel enqueue, isAccountInQueue ${key} ${isAccountInQueue}` - ) - return false - } - } - } - - let cycleNumber = this.stateManager.currentCycleShardData.cycleNumber - if (Context.config.stateManager.deterministicTXCycleEnabled) { - cycleNumber = CycleChain.getCycleNumberFromTimestamp( - acceptedTx.timestamp - Context.config.stateManager.reduceTimeFromTxTimestamp, - true, - false - ) - if (cycleNumber > this.stateManager.currentCycleShardData.cycleNumber) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction derived txGroupCycle > currentCycleShardData.cycleNumber. txId:${txId} txGroupCycle:${cycleNumber} currentCycleShardData.cycleNumber:${this.stateManager.currentCycleShardData.cycleNumber}`) - nestedCountersInstance.countEvent('stateManager', 'derived txGroupCycle is larger than current cycle') - if (Context.config.stateManager.fallbackToCurrentCycleFortxGroup) { - cycleNumber = this.stateManager.currentCycleShardData.cycleNumber - } - } else if (cycleNumber < this.stateManager.currentCycleShardData.cycleNumber) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction derived txGroupCycle < currentCycleShardData.cycleNumber. txId:${txId} txGroupCycle:${cycleNumber} currentCycleShardData.cycleNumber:${this.stateManager.currentCycleShardData.cycleNumber}`) - nestedCountersInstance.countEvent('stateManager', 'derived txGroupCycle is less than current cycle') - } else if (cycleNumber === this.stateManager.currentCycleShardData.cycleNumber) { - nestedCountersInstance.countEvent('stateManager', 'derived txGroupCycle is same as current cycle') - } - } - - this.queueEntryCounter++ - const txQueueEntry: QueueEntry = { - gossipedCompleteData: false, - eligibleNodeIdsToConfirm: new Set(), - eligibleNodeIdsToVote: new Set(), - acceptedTx: acceptedTx, - uniqueTags: this.app.getUniqueAppTags?.(acceptedTx.data.tx), - txKeys: keysResponse, - executionShardKey: null, - isInExecutionHome: true, - shardusMemoryPatternSets: null, - noConsensus, - collectedData: {}, - collectedFinalData: {}, - originalData: {}, - beforeHashes: {}, - homeNodes: {}, - patchedOnNodes: new Map(), - hasShardInfo: false, - state: 'aging', - dataCollected: 0, - hasAll: false, - entryID: this.queueEntryCounter, - localKeys: {}, - localCachedData: {}, - syncCounter: 0, - didSync: false, - queuedBeforeMainSyncComplete: false, - didWakeup: false, - syncKeys: [], - logstate: '', - requests: {}, - globalModification: globalModification, - collectedVotes: [], - collectedVoteHashes: [], - pendingConfirmOrChallenge: new Map(), - pendingVotes: new Map(), - waitForReceiptOnly: false, - m2TimeoutReached: false, - debugFail_voteFlip: false, - debugFail_failNoRepair: false, - requestingReceipt: false, - cycleToRecordOn: -5, - involvedPartitions: [], - involvedGlobalPartitions: [], - shortReceiptHash: '', - requestingReceiptFailed: false, - approximateCycleAge: cycleNumber, - ourNodeInTransactionGroup: false, - ourNodeInConsensusGroup: false, - logID: '', - txGroupDebug: '', - uniqueWritableKeys: [], - txGroupCycle: 0, - updatedTxGroupCycle: 0, - updatedTransactionGroup: null, - receiptEverRequested: false, - repairStarted: false, - repairFailed: false, - hasValidFinalData: false, - pendingDataRequest: false, - queryingFinalData: false, - lastFinalDataRequestTimestamp: 0, - newVotes: false, - fromClient: sendGossip, - gossipedReceipt: false, - gossipedVote: false, - gossipedConfirmOrChallenge: false, - completedConfirmedOrChallenge: false, - uniqueChallengesCount: 0, - uniqueChallenges: {}, - archived: false, - ourTXGroupIndex: -1, - ourExGroupIndex: -1, - involvedReads: {}, - involvedWrites: {}, - txDebug: { - enqueueHrTime: process.hrtime(), - startTime: {}, - endTime: {}, - duration: {}, - startTimestamp: {}, - endTimestamp: {}, - }, - executionGroupMap: new Map(), - executionNodeIdSorted: [], - txSieveTime: 0, - debug: {}, - voteCastAge: 0, - dataSharedTimestamp: 0, - firstVoteReceivedTimestamp: 0, - firstConfirmOrChallengeTimestamp: 0, - lastVoteReceivedTimestamp: 0, - lastConfirmOrChallengeTimestamp: 0, - robustQueryVoteCompleted: false, - robustQueryConfirmOrChallengeCompleted: false, - acceptVoteMessage: true, - acceptConfirmOrChallenge: true, - accountDataSet: false, - topConfirmations: new Set(), - topVoters: new Set(), - hasRobustConfirmation: false, - sharedCompleteData: false, - correspondingGlobalOffset: 0, - isSenderWrappedTxGroup: {}, - isNGT: this.app.isNGT(acceptedTx.data?.tx), - } // age comes from timestamp - this.txDebugMarkStartTime(txQueueEntry, 'total_queue_time') - this.txDebugMarkStartTime(txQueueEntry, 'aging') - - // todo faster hash lookup for this maybe? - const entry = this.getQueueEntrySafe(acceptedTx.txId) // , acceptedTx.timestamp) - if (entry) { - return false // already in our queue, or temp queue - } - - txQueueEntry.logID = utils.makeShortHash(acceptedTx.txId) - - this.stateManager.debugTXHistory[txQueueEntry.logID] = 'enteredQueue' - - if (this.app.canDebugDropTx(acceptedTx.data)) { - if ( - this.stateManager.testFailChance( - this.stateManager.loseTxChance, - 'loseTxChance', - txQueueEntry.logID, - '', - logFlags.verbose - ) === true - ) { - return 'lost' - } - if ( - this.stateManager.testFailChance( - this.stateManager.voteFlipChance, - 'voteFlipChance', - txQueueEntry.logID, - '', - logFlags.verbose - ) === true - ) { - txQueueEntry.debugFail_voteFlip = true - } - - if ( - globalModification === false && - this.stateManager.testFailChance( - this.stateManager.failNoRepairTxChance, - 'failNoRepairTxChance', - txQueueEntry.logID, - '', - logFlags.verbose - ) === true - ) { - txQueueEntry.debugFail_failNoRepair = true - } - } - - try { - // use shardusGetTime() instead of Date.now as many thing depend on it - const age = shardusGetTime() - timestamp - - const keyHash: StringBoolObjectMap = {} //TODO replace with Set - for (const key of txQueueEntry.txKeys.allKeys) { - if (key == null) { - // throw new Error(`routeAndQueueAcceptedTransaction key == null ${key}`) - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction key == null ${timestamp} not putting tx in queue.`) - return false - } - - // eslint-disable-next-line security/detect-object-injection - keyHash[key] = true - } - txQueueEntry.uniqueKeys = Object.keys(keyHash) - - if (txQueueEntry.txKeys.allKeys == null || txQueueEntry.txKeys.allKeys.length === 0) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction allKeys == null || allKeys.length === 0 ${timestamp} not putting tx in queue.`) - return false - } - let cycleShardData = this.stateManager.currentCycleShardData - if (Context.config.stateManager.deterministicTXCycleEnabled) { - txQueueEntry.txGroupCycle = cycleNumber - cycleShardData = this.stateManager.shardValuesByCycle.get(cycleNumber) - } - txQueueEntry.txDebug.cycleSinceActivated = - cycleNumber - activeByIdOrder.find((node) => node.id === Self.id).activeCycle - - if (cycleShardData == null) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction logID:${txQueueEntry.logID} cycleShardData == null cycle:${cycleNumber} not putting tx in queue.`) - nestedCountersInstance.countEvent('stateManager', 'routeAndQueueAcceptedTransaction cycleShardData == null') - return false - } - - this.updateHomeInformation(txQueueEntry) - - //set the executionShardKey for the transaction - if (txQueueEntry.globalModification === false && this.executeInOneShard) { - //USE the first key in the list of all keys. Applications much carefully sort this list - //so that we start in the optimal shard. This will matter less when shard hopping is implemented - txQueueEntry.executionShardKey = txQueueEntry.txKeys.allKeys[0] - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction set executionShardKey tx:${txQueueEntry.logID} ts:${timestamp} executionShardKey: ${utils.stringifyReduce(txQueueEntry.executionShardKey)} `) - - // we were doing this in queueEntryGetTransactionGroup. moved it earlier. - const { homePartition } = ShardFunctions.addressToPartition( - cycleShardData.shardGlobals, - txQueueEntry.executionShardKey - ) - - const homeShardData = cycleShardData.parititionShardDataMap.get(homePartition) - - //set the nodes that are in the executionGroup. - //This is needed so that consensus will expect less nodes to be voting - const unRankedExecutionGroup = homeShardData.homeNodes[0].consensusNodeForOurNodeFull.slice() - if (this.usePOQo) { - txQueueEntry.executionGroup = this.orderNodesByRank(unRankedExecutionGroup, txQueueEntry) - } else if (this.useNewPOQ) { - txQueueEntry.executionGroup = this.orderNodesByRank(unRankedExecutionGroup, txQueueEntry) - } else { - txQueueEntry.executionGroup = unRankedExecutionGroup - } - // for the new FACT algorithm - txQueueEntry.executionNodeIdSorted = txQueueEntry.executionGroup.map((node) => node.id).sort() - - if (txQueueEntry.isInExecutionHome) { - txQueueEntry.ourNodeRank = this.computeNodeRank( - cycleShardData.ourNode.id, - txQueueEntry.acceptedTx.txId, - txQueueEntry.acceptedTx.timestamp - ) - } - - const minNodesToVote = 3 - const voterPercentage = configContext.stateManager.voterPercentage - const numberOfVoters = Math.max( - minNodesToVote, - Math.floor(txQueueEntry.executionGroup.length * voterPercentage) - ) - // voters are highest ranked nodes - txQueueEntry.eligibleNodeIdsToVote = new Set( - txQueueEntry.executionGroup.slice(0, numberOfVoters).map((node) => node.id) - ) - - // confirm nodes are lowest ranked nodes - txQueueEntry.eligibleNodeIdsToConfirm = new Set( - txQueueEntry.executionGroup - .slice(txQueueEntry.executionGroup.length - numberOfVoters) - .map((node) => node.id) - ) - - // calculate globalOffset for FACT - // take last 2 bytes of the txId and convert it to an integer - txQueueEntry.correspondingGlobalOffset = parseInt(txId.slice(-4), 16) - - const ourID = cycleShardData.ourNode.id - for (let idx = 0; idx < txQueueEntry.executionGroup.length; idx++) { - // eslint-disable-next-line security/detect-object-injection - const node = txQueueEntry.executionGroup[idx] - txQueueEntry.executionGroupMap.set(node.id, node) - if (node.id === ourID) { - txQueueEntry.ourExGroupIndex = idx - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: executor index ${txQueueEntry.ourExGroupIndex}:${(node as Shardus.NodeWithRank).rank}`) - } - } - if (txQueueEntry.eligibleNodeIdsToConfirm.has(Self.id)) { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: confirmator`) - } - if (txQueueEntry.eligibleNodeIdsToVote.has(Self.id)) { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: voter`) - } - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize voters ${txQueueEntry.eligibleNodeIdsToConfirm.size}`) - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize confirmators ${txQueueEntry.eligibleNodeIdsToConfirm.size}`) - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize execution ${txQueueEntry.executionGroup.length}`) - - //if we are not in the execution group then set isInExecutionHome to false - if (txQueueEntry.executionGroupMap.has(cycleShardData.ourNode.id) === false) { - txQueueEntry.isInExecutionHome = false - } - - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction info ${txQueueEntry.logID} isInExecutionHome:${txQueueEntry.isInExecutionHome} hasShardInfo:${txQueueEntry.hasShardInfo}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('routeAndQueueAcceptedTransaction', `routeAndQueueAcceptedTransaction info ${txQueueEntry.logID} isInExecutionHome:${txQueueEntry.isInExecutionHome} hasShardInfo:${txQueueEntry.hasShardInfo} executionShardKey:${utils.makeShortHash(txQueueEntry.executionShardKey)}`) - /* prettier-ignore */ if (this.stateManager.consensusLog) this.mainLogger.debug(`routeAndQueueAcceptedTransaction info ${txQueueEntry.logID} isInExecutionHome:${txQueueEntry.isInExecutionHome}`) - } - - // calculate information needed for receiptmap - txQueueEntry.cycleToRecordOn = CycleChain.getCycleNumberFromTimestamp(timestamp) - /* prettier-ignore */ if (logFlags.verbose) console.log('Cycle number from timestamp', timestamp, txQueueEntry.cycleToRecordOn) - if (txQueueEntry.cycleToRecordOn < 0) { - nestedCountersInstance.countEvent('getCycleNumberFromTimestamp', 'caused Enqueue fail') - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`routeAndQueueAcceptedTransaction failed to calculate cycle ${timestamp} error code:${txQueueEntry.cycleToRecordOn}`) - return false - } - if (txQueueEntry.cycleToRecordOn == null) { - this.statemanager_fatal( - `routeAndQueueAcceptedTransaction cycleToRecordOn==null`, - `routeAndQueueAcceptedTransaction cycleToRecordOn==null ${txQueueEntry.logID} ${timestamp}` - ) - } - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueInsertion_start', txQueueEntry.logID, `${txQueueEntry.logID} uniqueKeys:${utils.stringifyReduce(txQueueEntry.uniqueKeys)} txKeys: ${utils.stringifyReduce(txQueueEntry.txKeys)} cycleToRecordOn:${txQueueEntry.cycleToRecordOn}`) - - // Look at our keys and log which are known global accounts. Set global accounts for keys if this is a globalModification TX - for (const key of txQueueEntry.uniqueKeys) { - if (globalModification === true) { - if (this.stateManager.accountGlobals.isGlobalAccount(key)) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', `routeAndQueueAcceptedTransaction - has account:${utils.stringifyReduce(key)}`) - } else { - //this makes the code aware that this key is for a global account. - //is setting this here too soon? - //it should be that p2p has already checked the receipt before calling shardus.push with global=true - this.stateManager.accountGlobals.setGlobalAccount(key) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', `routeAndQueueAcceptedTransaction - set account:${utils.stringifyReduce(key)}`) - } - } - } - - // slightly different flag that didsync. This is less about if our address range was done syncing (which can happen any time) - // and just a simple check to see if this was queued before the main sync phase. - // for now, just used for more detailed logging so we can sort out if problem TXs were from shortly before we were fully done - // but after a sync range was finished, (used shortly below in the age check) - txQueueEntry.queuedBeforeMainSyncComplete = this.stateManager.accountSync.dataSyncMainPhaseComplete - - // Check to see if any keys are inside of a syncing range. - // If it is a global key in a non-globalModification TX then we dont care about it - - // COMPLETE HACK!!!!!!!!! - // for (let key of txQueueEntry.uniqueKeys) { - // let syncTracker = this.stateManager.accountSync.getSyncTracker(key) - // // only look at syncing for accounts that are changed. - // // if the sync range is for globals and the tx is not a global modifier then skip it! - - // // todo take another look at this condition and syncTracker.globalAddressMap - // if (syncTracker != null && (syncTracker.isGlobalSyncTracker === false || txQueueEntry.globalModification === true)) { - // if (this.stateManager.accountSync.softSync_noSyncDelay === true) { - // //no delay means that don't pause the TX in state = 'syncing' - // } else { - // txQueueEntry.state = 'syncing' - // txQueueEntry.syncCounter++ - // syncTracker.queueEntries.push(txQueueEntry) // same tx may get pushed in multiple times. that's ok. - // syncTracker.keys[key] = true //mark this key for fast testing later - // } - - // txQueueEntry.didSync = true // mark that this tx had to sync, this flag should never be cleared, we will use it later to not through stuff away. - // txQueueEntry.syncKeys.push(key) // used later to instruct what local data we should JIT load - // txQueueEntry.localKeys[key] = true // used for the filter. TODO review why this is set true here!!! seems like this may flag some keys not owned by this node! - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_queued_and_set_syncing', `${txQueueEntry.logID}`, `${txQueueEntry.logID} qId: ${txQueueEntry.entryID} account:${utils.stringifyReduce(key)}`) - // } - // } - - if (age > this.stateManager.queueSitTime * 0.9) { - if (txQueueEntry.didSync === true) { - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `enqueue old TX didSync === true queuedBeforeMainSyncComplete:${txQueueEntry.queuedBeforeMainSyncComplete}`) - } else { - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `enqueue old TX didSync === false queuedBeforeMainSyncComplete:${txQueueEntry.queuedBeforeMainSyncComplete}`) - if (txQueueEntry.queuedBeforeMainSyncComplete) { - //only a fatal if it was after the main sync phase was complete. - this.statemanager_fatal( - `routeAndQueueAcceptedTransaction_olderTX`, - 'routeAndQueueAcceptedTransaction working on older tx ' + timestamp + ' age: ' + age - ) - // TODO consider throwing this out. right now it is just a warning - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_oldQueueInsertion', '', 'routeAndQueueAcceptedTransaction working on older tx ' + timestamp + ' age: ' + age) - } - } - } - - // Refine our list of which keys will be updated in this transaction : uniqueWritableKeys - for (const key of txQueueEntry.uniqueKeys) { - const isGlobalAcc = this.stateManager.accountGlobals.isGlobalAccount(key) - - // if it is a global modification and global account we can write - if (globalModification === true && isGlobalAcc === true) { - txQueueEntry.uniqueWritableKeys.push(key) - } - // if it is a normal transaction and non global account we can write - if (globalModification === false && isGlobalAcc === false) { - txQueueEntry.uniqueWritableKeys.push(key) - } - } - txQueueEntry.uniqueWritableKeys.sort() //need this list to be deterministic! - - if (txQueueEntry.hasShardInfo) { - const transactionGroup = this.queueEntryGetTransactionGroup(txQueueEntry) - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: groupsize transaction ${txQueueEntry.transactionGroup.length}`) - if (txQueueEntry.ourNodeInTransactionGroup || txQueueEntry.didSync === true) { - // go ahead and calculate this now if we are in the tx group or we are syncing this range! - this.queueEntryGetConsensusGroup(txQueueEntry) - - // populate isSenderWrappedTxGroup - for (const accountId of txQueueEntry.uniqueKeys) { - const homeNodeShardData = txQueueEntry.homeNodes[accountId] - const consensusGroupForAccount = homeNodeShardData.consensusNodeForOurNodeFull.map((n) => n.id) - const startAndEndIndices = this.getStartAndEndIndexOfTargetGroup( - consensusGroupForAccount, - txQueueEntry.transactionGroup - ) - const isWrapped = startAndEndIndices.endIndex < startAndEndIndices.startIndex - if (isWrapped === false) continue - const unwrappedEndIndex = startAndEndIndices.endIndex + txQueueEntry.transactionGroup.length - for (let i = startAndEndIndices.startIndex; i < unwrappedEndIndex; i++) { - if (i >= txQueueEntry.transactionGroup.length) { - const wrappedIndex = i - txQueueEntry.transactionGroup.length - txQueueEntry.isSenderWrappedTxGroup[txQueueEntry.transactionGroup[wrappedIndex].id] = i - } - } - } - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction isSenderWrappedTxGroup ${txQueueEntry.logID} ${utils.stringifyReduce(txQueueEntry.isSenderWrappedTxGroup)}`) - } - if (sendGossip && txQueueEntry.globalModification === false) { - try { - if (transactionGroup.length > 1) { - // should consider only forwarding in some cases? - this.stateManager.debugNodeGroup(txId, timestamp, `share to neighbors`, transactionGroup) - this.p2p.sendGossipIn( - 'spread_tx_to_group', - acceptedTx, - '', - sender, - transactionGroup, - true, - -1, - acceptedTx.txId - ) - /* prettier-ignore */ if (logFlags.verbose) console.log( 'spread_tx_to_group', txId, txQueueEntry.executionGroup.length, txQueueEntry.conensusGroup.length, txQueueEntry.transactionGroup.length ) - this.addOriginalTxDataToForward(txQueueEntry) - } - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('tx_homeGossip', `${txId}`, `AcceptedTransaction: ${acceptedTX}`) - } catch (ex) { - this.statemanager_fatal(`txQueueEntry_ex`, 'txQueueEntry: ' + utils.stringifyReduce(txQueueEntry)) - } - } - - if (txQueueEntry.didSync === false) { - // see if our node shard data covers any of the accounts? - if (txQueueEntry.ourNodeInTransactionGroup === false && txQueueEntry.globalModification === false) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_notInTxGroup', `${txQueueEntry.logID}`, ``) - return 'out of range' // we are done, not involved!!! - } else { - // If we have syncing neighbors forward this TX to them - if (this.config.debug.forwardTXToSyncingNeighbors && cycleShardData.hasSyncingNeighbors === true) { - let send_spread_tx_to_group_syncing = true - //todo turn this back on if other testing goes ok - if (txQueueEntry.ourNodeInTransactionGroup === false) { - /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'spread_tx_to_group_syncing-skipped2') - send_spread_tx_to_group_syncing = false - } else if (txQueueEntry.ourTXGroupIndex > 0) { - const everyN = Math.max(1, Math.floor(txQueueEntry.transactionGroup.length * 0.4)) - const nonce = parseInt('0x' + txQueueEntry.acceptedTx.txId.substring(0, 2)) - const idxPlusNonce = txQueueEntry.ourTXGroupIndex + nonce - const idxModEveryN = idxPlusNonce % everyN - if (idxModEveryN > 0) { - /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'spread_tx_to_group_syncing-skipped') - send_spread_tx_to_group_syncing = false - } - } - if (send_spread_tx_to_group_syncing) { - /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'spread_tx_to_group_syncing-notSkipped') - - // only send non global modification TXs - if (txQueueEntry.globalModification === false) { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction: spread_tx_to_group ${txQueueEntry.logID}`) - /* prettier-ignore */ - if (logFlags.playback) this.logger.playbackLogNote("shrd_sync_tx", `${txQueueEntry.logID}`, `txts: ${timestamp} nodes:${utils.stringifyReduce(cycleShardData.syncingNeighborsTxGroup.map((x) => x.id))}`) - - this.stateManager.debugNodeGroup( - txId, - timestamp, - `share to syncing neighbors`, - cycleShardData.syncingNeighborsTxGroup - ) - //this.p2p.sendGossipAll('spread_tx_to_group', acceptedTx, '', sender, cycleShardData.syncingNeighborsTxGroup) - // if ( - // this.stateManager.config.p2p.useBinarySerializedEndpoints && - // this.stateManager.config.p2p.spreadTxToGroupSyncingBinary - // ) { - if (logFlags.seqdiagram) { - for (const node of cycleShardData.syncingNeighborsTxGroup) { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${acceptedTx.txId} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'spread_tx_to_group_syncing'}`) - } - } - const request = acceptedTx as SpreadTxToGroupSyncingReq - this.p2p.tellBinary( - cycleShardData.syncingNeighborsTxGroup, - InternalRouteEnum.binary_spread_tx_to_group_syncing, - request, - serializeSpreadTxToGroupSyncingReq, - {} - ) - // } else { - // this.p2p.tell( - // cycleShardData.syncingNeighborsTxGroup, - // 'spread_tx_to_group_syncing', - // acceptedTx - // ) - // } - } else { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`routeAndQueueAcceptedTransaction: bugfix detected. avoid forwarding txs where globalModification == true ${txQueueEntry.logID}`) - } - } - } - } - } - } else { - throw new Error('missing shard info') - } - - this.computeTxSieveTime(txQueueEntry) - - if ( - this.config.debug.useShardusMemoryPatterns && - acceptedTx.shardusMemoryPatterns != null && - acceptedTx.shardusMemoryPatterns.ro != null - ) { - txQueueEntry.shardusMemoryPatternSets = { - ro: new Set(acceptedTx.shardusMemoryPatterns.ro), - rw: new Set(acceptedTx.shardusMemoryPatterns.rw), - wo: new Set(acceptedTx.shardusMemoryPatterns.wo), - on: new Set(acceptedTx.shardusMemoryPatterns.on), - ri: new Set(acceptedTx.shardusMemoryPatterns.ri), - } - nestedCountersInstance.countEvent('transactionQueue', 'shardusMemoryPatternSets included') - } else { - nestedCountersInstance.countEvent('transactionQueue', 'shardusMemoryPatternSets not included') - } - - // This call is not awaited. It is expected to be fast and will be done in the background. - this.queueEntryPrePush(txQueueEntry) - - this.pendingTransactionQueue.push(txQueueEntry) - this.pendingTransactionQueueByID.set(txQueueEntry.acceptedTx.txId, txQueueEntry) - - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txQueueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: pendingQ`) - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_txPreQueued', `${txQueueEntry.logID}`, `${txQueueEntry.logID} gm:${txQueueEntry.globalModification}`) - // start the queue if needed - this.stateManager.tryStartTransactionProcessingQueue() - } catch (error) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_addtoqueue_rejected', `${txId}`, `AcceptedTransaction: ${txQueueEntry.logID} ts: ${txQueueEntry.txKeys.timestamp} acc: ${utils.stringifyReduce(txQueueEntry.txKeys.allKeys)}`) - this.statemanager_fatal( - `routeAndQueueAcceptedTransaction_ex`, - 'routeAndQueueAcceptedTransaction failed: ' + errorToStringFull(error) - ) - throw new Error(error) - } - return true - } finally { - this.profiler.profileSectionEnd('enqueue') - } - } - - async queueEntryPrePush(txQueueEntry: QueueEntry): Promise { - this.profiler.profileSectionStart('queueEntryPrePush', true) - this.profiler.scopedProfileSectionStart('queueEntryPrePush', true) - // Pre fetch immutable read account data for this TX - if ( - this.config.features.enableRIAccountsCache && - txQueueEntry.shardusMemoryPatternSets && - txQueueEntry.shardusMemoryPatternSets.ri && - txQueueEntry.shardusMemoryPatternSets.ri.size > 0 - ) { - for (const key of txQueueEntry.shardusMemoryPatternSets.ri) { - /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'queueEntryPrePush_ri') - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.info(`queueEntryPrePush: fetching immutable data for tx ${txQueueEntry.acceptedTx.txId} key ${key}`) - const accountData = await this.stateManager.getLocalOrRemoteAccount(key, { - useRICache: true, - }) - if (accountData != null) { - this.app.setCachedRIAccountData([accountData]) - this.queueEntryAddData( - txQueueEntry, - { - accountId: accountData.accountId, - stateId: accountData.stateId, - data: accountData.data, - timestamp: accountData.timestamp, - syncData: accountData.syncData, - accountCreated: false, - isPartial: false, - }, - false - ) - /* prettier-ignore */ nestedCountersInstance.countEvent('transactionQueue', 'queueEntryPrePush_ri_added') - } - } - } - this.profiler.scopedProfileSectionEnd('queueEntryPrePush') - this.profiler.profileSectionStart('queueEntryPrePush', true) - } - - /*** - * ####### ### ###### ###### ######## ###### ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ###### ###### ###### - * ## ## ## ######### ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## - * ##### ## ## ## ###### ###### ######## ###### ###### - */ - - /** - * getQueueEntry - * get a queue entry from the current queue - * @param txid - */ - getQueueEntry(txid: string): QueueEntry | null { - const queueEntry = this._transactionQueueByID.get(txid) - if (queueEntry === undefined) { - return null - } - return queueEntry - } - - /** - * getQueueEntrySafe - * get a queue entry from the queue or the pending queue (but not archive queue) - * @param txid - */ - getQueueEntrySafe(txid: string): QueueEntry | null { - let queueEntry = this._transactionQueueByID.get(txid) - if (queueEntry === undefined) { - queueEntry = this.pendingTransactionQueueByID.get(txid) - if (queueEntry === undefined) { - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`getQueueEntrySafe failed to find: ${utils.stringifyReduce(txid)}`) - nestedCountersInstance.countEvent('getQueueEntrySafe', 'failed to find returning null') - return null - } - } - return queueEntry - } - - /** - * getQueueEntryArchived - * get a queue entry from the archive queue only - * @param txid - * @param msg - */ - getQueueEntryArchived(txid: string, msg: string): QueueEntry | null { - const queueEntry = this.archivedQueueEntriesByID.get(txid) - if (queueEntry != null) { - return queueEntry - } - nestedCountersInstance.countRareEvent('error', `getQueueEntryArchived no entry: ${msg}`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`getQueueEntryArchived failed to find: ${utils.stringifyReduce(txid)} ${msg} dbg:${this.stateManager.debugTXHistory[utils.stringifyReduce(txid)]}`) - return null - } - - getArchivedQueueEntryByAccountIdAndHash(accountId: string, hash: string, msg: string): QueueEntry | null { - try { - let foundQueueEntry = false - let foundVote = false - let foundVoteMatchingHash = false - for (const queueEntry of this.archivedQueueEntriesByID.values()) { - if (queueEntry.uniqueKeys.includes(accountId)) { - foundQueueEntry = true - const signedReceipt: SignedReceipt = this.stateManager.getSignedReceipt(queueEntry) - let proposal: Proposal | null = null - if (signedReceipt != null) { - proposal = signedReceipt.proposal - if (signedReceipt.proposal) - nestedCountersInstance.countEvent( - 'getArchivedQueueEntryByAccountIdAndHash', - 'get proposal from signedReceipt' - ) - } - if (proposal == null) { - proposal = queueEntry.ourProposal - if (queueEntry.receivedBestVote) - nestedCountersInstance.countEvent( - 'getArchivedQueueEntryByAccountIdAndHash', - 'get proposal' + ' from' + ' queueEntry.ourProposal' - ) - } - if (proposal == null) { - continue - } - foundVote = true - // this node might not have a vote for this tx - for (let i = 0; i < proposal.accountIDs.length; i++) { - // eslint-disable-next-line security/detect-object-injection - if (proposal.accountIDs[i] === accountId) { - // eslint-disable-next-line security/detect-possible-timing-attacks, security/detect-object-injection - if (proposal.afterStateHashes[i] === hash) { - foundVoteMatchingHash = true - return queueEntry - } - } - } - } - } - nestedCountersInstance.countRareEvent('error', `getQueueEntryArchived no entry: ${msg}`) - nestedCountersInstance.countEvent( - 'error', - `getQueueEntryArchived no entry: ${msg}, found queue entry: ${foundQueueEntry}, found vote: ${foundVote}, found vote matching hash: ${foundVoteMatchingHash}` - ) - return null - } catch (e) { - this.statemanager_fatal(`getArchivedQueueEntryByAccountIdAndHash`, `error: ${e.message}`) - return null - } - } - /** - * getQueueEntryArchived - * get a queue entry from the archive queue only - * @param txid - * @param msg - */ - getQueueEntryArchivedByTimestamp(timestamp: number, msg: string): QueueEntry | null { - for (const queueEntry of this.archivedQueueEntriesByID.values()) { - if (queueEntry.acceptedTx.timestamp === timestamp) { - return queueEntry - } - } - nestedCountersInstance.countRareEvent('error', `getQueueEntryArchived no entry: ${msg}`) - nestedCountersInstance.countEvent('error', `getQueueEntryArchived no entry: ${msg}`) - return null - } - - /** - * queueEntryAddData - * add data to a queue entry - * // TODO CODEREVIEW. need to look at the use of local cache. also is the early out ok? - * @param queueEntry - * @param data - */ - queueEntryAddData(queueEntry: QueueEntry, data: Shardus.WrappedResponse, signatureCheck = false): void { - if (queueEntry.uniqueKeys == null) { - nestedCountersInstance.countEvent('queueEntryAddData', 'uniqueKeys == null') - // cant have all data yet if we dont even have unique keys. - throw new Error( - `Attempting to add data and uniqueKeys are not available yet: ${utils.stringifyReduceLimit(queueEntry, 200)}` - ) - } - if (queueEntry.collectedData[data.accountId] != null) { - if (configContext.stateManager.collectedDataFix) { - // compare the timestamps and keep the newest - const existingData = queueEntry.collectedData[data.accountId] - if (data.timestamp > existingData.timestamp) { - queueEntry.collectedData[data.accountId] = data - nestedCountersInstance.countEvent('queueEntryAddData', 'collectedDataFix replace with newer data') - } else { - nestedCountersInstance.countEvent('queueEntryAddData', 'already collected 1') - return - } - } else { - // we have already collected this data - nestedCountersInstance.countEvent('queueEntryAddData', 'already collected 2') - return - } - } - profilerInstance.profileSectionStart('queueEntryAddData', true) - // check the signature of each account data - if (signatureCheck && (data.sign == null || data.sign.owner == null || data.sign.sig == null)) { - this.mainLogger.fatal(`queueEntryAddData: data.sign == null ${utils.stringifyReduce(data)}`) - nestedCountersInstance.countEvent('queueEntryAddData', 'data.sign == null') - return - } - - if (signatureCheck) { - const dataSenderPublicKey = data.sign.owner - const dataSenderNode: Shardus.Node = byPubKey[dataSenderPublicKey] - if (dataSenderNode == null) { - nestedCountersInstance.countEvent('queueEntryAddData', 'dataSenderNode == null') - return - } - const consensusNodesForAccount = queueEntry.homeNodes[data.accountId]?.consensusNodeForOurNodeFull - if ( - consensusNodesForAccount == null || - consensusNodesForAccount.map((n) => n.id).includes(dataSenderNode.id) === false - ) { - nestedCountersInstance.countEvent( - 'queueEntryAddData', - 'data sender node is not in the consensus group of the' + ' account' - ) - return - } - - const singedData = data as SignedObject - - if (this.crypto.verify(singedData) === false) { - nestedCountersInstance.countEvent('queueEntryAddData', 'data signature verification failed') - return - } - } - - queueEntry.collectedData[data.accountId] = data - queueEntry.dataCollected = Object.keys(queueEntry.collectedData).length - - //make a deep copy of the data - queueEntry.originalData[data.accountId] = Utils.safeJsonParse(Utils.safeStringify(data)) - queueEntry.beforeHashes[data.accountId] = data.stateId - - if (queueEntry.dataCollected === queueEntry.uniqueKeys.length) { - // queueEntry.tx Keys.allKeys.length - queueEntry.hasAll = true - // this.gossipCompleteData(queueEntry) - if (queueEntry.executionGroup && queueEntry.executionGroup.length > 1) - this.shareCompleteDataToNeighbours(queueEntry) - if (logFlags.debug || this.stateManager.consensusLog) { - this.mainLogger.debug( - `queueEntryAddData hasAll: true for txId ${queueEntry.logID} ${ - queueEntry.acceptedTx.txId - } at timestamp: ${shardusGetTime()} nodeId: ${Self.id} collected ${ - Object.keys(queueEntry.collectedData).length - } uniqueKeys ${queueEntry.uniqueKeys.length}` - ) - } - } - - if (data.localCache) { - queueEntry.localCachedData[data.accountId] = data.localCache - delete data.localCache - } - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_addData', `${queueEntry.logID}`, `key ${utils.makeShortHash(data.accountId)} hash: ${utils.makeShortHash(data.stateId)} hasAll:${queueEntry.hasAll} collected:${queueEntry.dataCollected} ${queueEntry.acceptedTx.timestamp}`) - profilerInstance.profileSectionStart('queueEntryAddData', true) - } - - async shareCompleteDataToNeighbours(queueEntry: QueueEntry): Promise { - if (configContext.stateManager.shareCompleteData === false) { - return - } - if (queueEntry.hasAll === false || queueEntry.sharedCompleteData) { - return - } - if (queueEntry.isInExecutionHome === false) { - return - } - const dataToShare: WrappedResponses = {} - const stateList: Shardus.WrappedResponse[] = [] - for (const accountId in queueEntry.collectedData) { - const data = queueEntry.collectedData[accountId] - const riCacheResult = await this.app.getCachedRIAccountData([accountId]) - if (riCacheResult != null && riCacheResult.length > 0) { - nestedCountersInstance.countEvent('shareCompleteDataToNeighbours', 'riCacheResult, skipping') - continue - } else { - dataToShare[accountId] = data - stateList.push(data) - } - } - const payload = { txid: queueEntry.acceptedTx.txId, stateList } - const neighboursNodes = utils.selectNeighbors(queueEntry.executionGroup, queueEntry.ourExGroupIndex, 2) - if (stateList.length > 0) { - this.broadcastState(neighboursNodes, payload, 'shareCompleteDataToNeighbours') - - queueEntry.sharedCompleteData = true - nestedCountersInstance.countEvent( - `queueEntryAddData`, - `sharedCompleteData stateList: ${stateList.length} neighbours: ${neighboursNodes.length}` - ) - if (logFlags.debug || this.stateManager.consensusLog) { - this.mainLogger.debug( - `shareCompleteDataToNeighbours: shared complete data for txId ${ - queueEntry.logID - } at timestamp: ${shardusGetTime()} nodeId: ${Self.id} to neighbours: ${Utils.safeStringify( - neighboursNodes.map((node) => node.id) - )}` - ) - } - } - } - - async gossipCompleteData(queueEntry: QueueEntry): Promise { - if (queueEntry.hasAll === false || queueEntry.gossipedCompleteData) { - return - } - if (configContext.stateManager.gossipCompleteData === false) { - return - } - const dataToGossip: WrappedResponses = {} - const stateList: Shardus.WrappedResponse[] = [] - for (const accountId in queueEntry.collectedData) { - const data = queueEntry.collectedData[accountId] - const riCacheResult = await this.app.getCachedRIAccountData([accountId]) - if (riCacheResult != null && riCacheResult.length > 0) { - nestedCountersInstance.countEvent('gossipCompleteData', 'riCacheResult, skipping') - continue - } else { - dataToGossip[accountId] = data - stateList.push(data) - } - } - const payload = { txid: queueEntry.acceptedTx.txId, stateList } - if (stateList.length > 0) { - Comms.sendGossip( - 'broadcast_state_complete_data', // deprecated - payload, - '', - Self.id, - queueEntry.executionGroup, - true, - 6, - queueEntry.acceptedTx.txId - ) - queueEntry.gossipedCompleteData = true - nestedCountersInstance.countEvent('gossipCompleteData', `stateList: ${stateList.length}`) - if (logFlags.debug || this.stateManager.consensusLog) { - this.mainLogger.debug( - `gossipQueueEntryData: gossiped data for txId ${queueEntry.logID} at timestamp: ${shardusGetTime()} nodeId: ${ - Self.id - }` - ) - } - } - } - - /** - * queueEntryHasAllData - * Test if the queueEntry has all the data it needs. - * TODO could be slightly more if it only recalculated when dirty.. but that would add more state and complexity, - * so wait for this to show up in the profiler before fixing - * @param queueEntry - */ - queueEntryHasAllData(queueEntry: QueueEntry): boolean { - if (queueEntry.hasAll === true) { - return true - } - if (queueEntry.uniqueKeys == null) { - throw new Error(`queueEntryHasAllData (queueEntry.uniqueKeys == null)`) - } - let dataCollected = 0 - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (queueEntry.collectedData[key] != null) { - dataCollected++ - } - } - if (dataCollected === queueEntry.uniqueKeys.length) { - // queueEntry.tx Keys.allKeys.length uniqueKeys.length - queueEntry.hasAll = true - return true - } - return false - } - - queueEntryListMissingData(queueEntry: QueueEntry): string[] { - if (queueEntry.hasAll === true) { - return [] - } - if (queueEntry.uniqueKeys == null) { - throw new Error(`queueEntryListMissingData (queueEntry.uniqueKeys == null)`) - } - const missingAccounts = [] - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (queueEntry.collectedData[key] == null) { - missingAccounts.push(key) - } - } - - return missingAccounts - } - - /** - * queueEntryRequestMissingData - * ask other nodes for data that is missing for this TX. - * normally other nodes in the network should foward data to us at the correct time. - * This is only for the case that a TX has waited too long and not received the data it needs. - * @param queueEntry - */ - async queueEntryRequestMissingData(queueEntry: QueueEntry): Promise { - if (this.stateManager.currentCycleShardData == null) { - return - } - - if (queueEntry.pendingDataRequest === true) { - return - } - queueEntry.pendingDataRequest = true - - nestedCountersInstance.countEvent('processing', 'queueEntryRequestMissingData-start') - - if (!queueEntry.requests) { - queueEntry.requests = {} - } - if (queueEntry.uniqueKeys == null) { - throw new Error('queueEntryRequestMissingData queueEntry.uniqueKeys == null') - } - - const allKeys = [] - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (queueEntry.collectedData[key] == null) { - allKeys.push(key) - } - } - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_start', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} AccountsMissing:${utils.stringifyReduce(allKeys)}`) - - // consensus group should have all the data.. may need to correct this later - //let consensusGroup = this.queueEntryGetConsensusGroup(queueEntry) - //let consensusGroup = this.queueEntryGetTransactionGroup(queueEntry) - - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (queueEntry.collectedData[key] == null && queueEntry.requests[key] == null) { - let keepTrying = true - let triesLeft = 5 - // let triesLeft = Math.min(5, consensusGroup.length ) - // let nodeIndex = 0 - while (keepTrying) { - if (triesLeft <= 0) { - keepTrying = false - break - } - triesLeft-- - // eslint-disable-next-line security/detect-object-injection - const homeNodeShardData = queueEntry.homeNodes[key] // mark outstanding request somehow so we dont rerequest - - // let node = consensusGroup[nodeIndex] - // nodeIndex++ - - // find a random node to ask that is not us - let node = null - let randomIndex: number - let foundValidNode = false - let maxTries = 1000 - - // todo make this non random!!!. It would be better to build a list and work through each node in order and then be finished - // we have other code that does this fine. - while (foundValidNode == false) { - maxTries-- - randomIndex = this.stateManager.getRandomInt(homeNodeShardData.consensusNodeForOurNodeFull.length - 1) - // eslint-disable-next-line security/detect-object-injection - node = homeNodeShardData.consensusNodeForOurNodeFull[randomIndex] - if (maxTries < 0) { - //FAILED - this.statemanager_fatal( - `queueEntryRequestMissingData`, - `queueEntryRequestMissingData: unable to find node to ask after 1000 tries tx:${ - queueEntry.logID - } key: ${utils.makeShortHash(key)} ${utils.stringifyReduce( - homeNodeShardData.consensusNodeForOurNodeFull.map((x) => (x != null ? x.id : 'null')) - )}` - ) - break - } - if (node == null) { - continue - } - if (node.id === this.stateManager.currentCycleShardData.nodeShardData.node.id) { - continue - } - foundValidNode = true - } - - if (node == null) { - continue - } - if (node.status != 'active' || potentiallyRemoved.has(node.id)) { - continue - } - if (node === this.stateManager.currentCycleShardData.ourNode) { - continue - } - - // Todo: expand this to grab a consensus node from any of the involved consensus nodes. - - for (const key2 of allKeys) { - // eslint-disable-next-line security/detect-object-injection - queueEntry.requests[key2] = node - } - - const relationString = ShardFunctions.getNodeRelation( - homeNodeShardData, - this.stateManager.currentCycleShardData.ourNode.id - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_ask', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} AccountsMissing:${utils.stringifyReduce(allKeys)}`) - - // Node Precheck! - if ( - this.stateManager.isNodeValidForInternalMessage(node.id, 'queueEntryRequestMissingData', true, true) === - false - ) { - // if(this.tryNextDataSourceNode('queueEntryRequestMissingData') == false){ - // break - // } - continue - } - - const message = { - keys: allKeys, - txid: queueEntry.acceptedTx.txId, - timestamp: queueEntry.acceptedTx.timestamp, - } - let result = null - try { - // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.requestStateForTxBinary) { - // GOLD-66 Error handling try/catch happens one layer outside of this function in process transactions - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'request_state_for_tx'}`) - result = (await this.p2p.askBinary( - node, - InternalRouteEnum.binary_request_state_for_tx, - message, - serializeRequestStateForTxReq, - deserializeRequestStateForTxResp, - {} - )) as RequestStateForTxRespSerialized - // } else { - // result = (await this.p2p.ask(node, 'request_state_for_tx', message)) as RequestStateForTxResp - // } - } catch (error) { - /* prettier-ignore */ if (logFlags.error) { - if (error instanceof ResponseError) { - this.mainLogger.error( - `ASK FAIL request_state_for_tx : exception encountered where the error is ${error}` - ) - } - } - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('askBinary request_state_for_tx exception:', error) - - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`askBinary error: ${InternalRouteEnum.binary_request_state_for_tx} asked to ${node.externalIp}:${node.externalPort}:${node.id}`) - } - - if (result == null) { - if (logFlags.verbose) { - if (logFlags.error) this.mainLogger.error('ASK FAIL request_state_for_tx') - } - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_askfailretry', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) - continue - } - if (result.success !== true) { - if (logFlags.error) this.mainLogger.error('ASK FAIL queueEntryRequestMissingData 9') - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_askfailretry2', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) - continue - } - - let dataCountReturned = 0 - const accountIdsReturned = [] - for (const data of result.stateList) { - this.queueEntryAddData(queueEntry, data) - dataCountReturned++ - accountIdsReturned.push(utils.makeShortHash(data.accountId)) - } - - if (queueEntry.hasAll === true) { - queueEntry.logstate = 'got all missing data' - } else { - queueEntry.logstate = 'failed to get data:' + queueEntry.hasAll - //This will time out and go to reciept repair mode if it does not get more data sent to it. - } - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingData_result', `${queueEntry.logID}`, `r:${relationString} result:${queueEntry.logstate} dataCount:${dataCountReturned} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} AccountsMissing:${utils.stringifyReduce(allKeys)} AccountsReturned:${utils.stringifyReduce(accountIdsReturned)}`) - - // queueEntry.homeNodes[key] = null - for (const key2 of allKeys) { - //consider deleteing these instead? - //TSConversion changed to a delete opertaion should double check this - //queueEntry.requests[key2] = null - // eslint-disable-next-line security/detect-object-injection - delete queueEntry.requests[key2] - } - - if (queueEntry.hasAll === true) { - break - } - - keepTrying = false - } - } - } - - if (queueEntry.hasAll === true) { - nestedCountersInstance.countEvent('processing', 'queueEntryRequestMissingData-success') - } else { - nestedCountersInstance.countEvent('processing', 'queueEntryRequestMissingData-failed') - - //give up and wait for receipt - queueEntry.waitForReceiptOnly = true - - if (this.config.stateManager.txStateMachineChanges) { - this.updateTxState(queueEntry, 'await final data', 'missing data') - } else { - this.updateTxState(queueEntry, 'consensing') - } - - if (logFlags.debug) - this.mainLogger.debug(`queueEntryRequestMissingData failed to get all data for: ${queueEntry.logID}`) - } - } - - /** - * queueEntryRequestMissingReceipt - * Ask other nodes for a receipt to go with this TX - * @param queueEntry - */ - async queueEntryRequestMissingReceipt(queueEntry: QueueEntry): Promise { - if (this.stateManager.currentCycleShardData == null) { - return - } - - if (queueEntry.uniqueKeys == null) { - throw new Error('queueEntryRequestMissingReceipt queueEntry.uniqueKeys == null') - } - - if (queueEntry.requestingReceipt === true) { - return - } - - queueEntry.requestingReceipt = true - queueEntry.receiptEverRequested = true - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_start', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID}`) - - const consensusGroup = this.queueEntryGetConsensusGroup(queueEntry) - - this.stateManager.debugNodeGroup( - queueEntry.acceptedTx.txId, - queueEntry.acceptedTx.timestamp, - `queueEntryRequestMissingReceipt`, - consensusGroup - ) - //let consensusGroup = this.queueEntryGetTransactionGroup(queueEntry) - //the outer loop here could just use the transaction group of nodes instead. but already had this working in a similar function - //TODO change it to loop the transaction group untill we get a good receipt - - //Note: we only need to get one good receipt, the loop on keys is in case we have to try different groups of nodes - let gotReceipt = false - for (const key of queueEntry.uniqueKeys) { - if (gotReceipt === true) { - break - } - - let keepTrying = true - let triesLeft = Math.min(5, consensusGroup.length) - let nodeIndex = 0 - while (keepTrying) { - if (triesLeft <= 0) { - keepTrying = false - break - } - triesLeft-- - // eslint-disable-next-line security/detect-object-injection - const homeNodeShardData = queueEntry.homeNodes[key] // mark outstanding request somehow so we dont rerequest - - // eslint-disable-next-line security/detect-object-injection - const node = consensusGroup[nodeIndex] - nodeIndex++ - - if (node == null) { - continue - } - if (node.status != 'active' || potentiallyRemoved.has(node.id)) { - continue - } - if (node === this.stateManager.currentCycleShardData.ourNode) { - continue - } - - const relationString = ShardFunctions.getNodeRelation( - homeNodeShardData, - this.stateManager.currentCycleShardData.ourNode.id - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_ask', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) - - // Node Precheck! - if ( - this.stateManager.isNodeValidForInternalMessage(node.id, 'queueEntryRequestMissingReceipt', true, true) === - false - ) { - // if(this.tryNextDataSourceNode('queueEntryRequestMissingReceipt') == false){ - // break - // } - continue - } - - const message = { txid: queueEntry.acceptedTx.txId, timestamp: queueEntry.acceptedTx.timestamp } - let result = null - // GOLD-67 to be safe this function needs a try/catch block to prevent a timeout from causing an unhandled exception - // if ( - // this.stateManager.config.p2p.useBinarySerializedEndpoints && - // this.stateManager.config.p2p.requestReceiptForTxBinary - // ) { - try { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'request_receipt_for_tx'}`) - result = await this.p2p.askBinary( - node, - InternalRouteEnum.binary_request_receipt_for_tx, - message, - serializeRequestReceiptForTxReq, - deserializeRequestReceiptForTxResp, - {} - ) - } catch (e) { - this.statemanager_fatal(`queueEntryRequestMissingReceipt`, `error: ${e.message}`) - /* prettier-ignore */ this.mainLogger.error(`askBinary error: ${InternalRouteEnum.binary_request_receipt_for_tx} asked to ${node.externalIp}:${node.externalPort}:${node.id}`) - } - // } else { - // result = await this.p2p.ask(node, 'request_receipt_for_tx', message) // not sure if we should await this. - // } - - if (result == null) { - if (logFlags.verbose) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL request_receipt_for_tx ${triesLeft} ${utils.makeShortHash(node.id)}`) - } - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_askfailretry', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) - continue - } - if (result.success !== true) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL queueEntryRequestMissingReceipt 9 ${triesLeft} ${utils.makeShortHash(node.id)}:${utils.makeShortHash(node.internalPort)} note:${result.note} txid:${queueEntry.logID}`) - continue - } - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_result', `${queueEntry.logID}`, `r:${relationString} result:${queueEntry.logstate} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} result: ${utils.stringifyReduce(result)}`) - - if (result.success === true && result.receipt != null) { - //TODO implement this!!! - queueEntry.receivedSignedReceipt = result.receipt - keepTrying = false - gotReceipt = true - - this.mainLogger.debug( - `queueEntryRequestMissingReceipt got good receipt for: ${queueEntry.logID} from: ${utils.makeShortHash( - node.id - )}:${utils.makeShortHash(node.internalPort)}` - ) - } - } - - // break the outer loop after we are done trying. todo refactor this. - if (keepTrying == false) { - break - } - } - queueEntry.requestingReceipt = false - - if (gotReceipt === false) { - queueEntry.requestingReceiptFailed = true - } - } - - // async queueEntryRequestMissingReceipt_old(queueEntry: QueueEntry): Promise { - // if (this.stateManager.currentCycleShardData == null) { - // return - // } - - // if (queueEntry.uniqueKeys == null) { - // throw new Error('queueEntryRequestMissingReceipt queueEntry.uniqueKeys == null') - // } - - // if (queueEntry.requestingReceipt === true) { - // return - // } - - // queueEntry.requestingReceipt = true - // queueEntry.receiptEverRequested = true - - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_start', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID}`) - - // const consensusGroup = this.queueEntryGetConsensusGroup(queueEntry) - - // this.stateManager.debugNodeGroup( - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp, - // `queueEntryRequestMissingReceipt`, - // consensusGroup - // ) - // //let consensusGroup = this.queueEntryGetTransactionGroup(queueEntry) - // //the outer loop here could just use the transaction group of nodes instead. but already had this working in a similar function - // //TODO change it to loop the transaction group untill we get a good receipt - - // //Note: we only need to get one good receipt, the loop on keys is in case we have to try different groups of nodes - // let gotReceipt = false - // for (const key of queueEntry.uniqueKeys) { - // if (gotReceipt === true) { - // break - // } - - // let keepTrying = true - // let triesLeft = Math.min(5, consensusGroup.length) - // let nodeIndex = 0 - // while (keepTrying) { - // if (triesLeft <= 0) { - // keepTrying = false - // break - // } - // triesLeft-- - // // eslint-disable-next-line security/detect-object-injection - // const homeNodeShardData = queueEntry.homeNodes[key] // mark outstanding request somehow so we dont rerequest - - // // eslint-disable-next-line security/detect-object-injection - // const node = consensusGroup[nodeIndex] - // nodeIndex++ - - // if (node == null) { - // continue - // } - // if (node.status != 'active' || potentiallyRemoved.has(node.id)) { - // continue - // } - // if (node === this.stateManager.currentCycleShardData.ourNode) { - // continue - // } - - // const relationString = ShardFunctions.getNodeRelation( - // homeNodeShardData, - // this.stateManager.currentCycleShardData.ourNode.id - // ) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_ask', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) - - // // Node Precheck! - // if ( - // this.stateManager.isNodeValidForInternalMessage( - // node.id, - // 'queueEntryRequestMissingReceipt', - // true, - // true - // ) === false - // ) { - // // if(this.tryNextDataSourceNode('queueEntryRequestMissingReceipt') == false){ - // // break - // // } - // continue - // } - - // const message = { txid: queueEntry.acceptedTx.txId, timestamp: queueEntry.acceptedTx.timestamp } - // const result: RequestReceiptForTxResp_old = await this.p2p.ask( - // node, - // 'request_receipt_for_tx_old', - // message - // ) // not sure if we should await this. - - // if (result == null) { - // if (logFlags.verbose) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL request_receipt_for_tx_old ${triesLeft} ${utils.makeShortHash(node.id)}`) - // } - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_askfailretry', `${queueEntry.logID}`, `r:${relationString} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} `) - // continue - // } - // if (result.success !== true) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL queueEntryRequestMissingReceipt 9 ${triesLeft} ${utils.makeShortHash(node.id)}:${utils.makeShortHash(node.internalPort)} note:${result.note} txid:${queueEntry.logID}`) - // continue - // } - - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_queueEntryRequestMissingReceipt_result', `${queueEntry.logID}`, `r:${relationString} result:${queueEntry.logstate} asking: ${utils.makeShortHash(node.id)} qId: ${queueEntry.entryID} result: ${utils.stringifyReduce(result)}`) - - // if (result.success === true && result.receipt != null) { - // //TODO implement this!!! - // queueEntry.recievedAppliedReceipt = result.receipt - // keepTrying = false - // gotReceipt = true - - // this.mainLogger.debug( - // `queueEntryRequestMissingReceipt got good receipt for: ${ - // queueEntry.logID - // } from: ${utils.makeShortHash(node.id)}:${utils.makeShortHash(node.internalPort)}` - // ) - // } - // } - - // // break the outer loop after we are done trying. todo refactor this. - // if (keepTrying == false) { - // break - // } - // } - // queueEntry.requestingReceipt = false - - // if (gotReceipt === false) { - // queueEntry.requestingReceiptFailed = true - // } - // } - - // compute the rand of the node where rank = node_id XOR hash(tx_id + tx_ts) - computeNodeRank(nodeId: string, txId: string, txTimestamp: number): bigint { - if (nodeId == null || txId == null || txTimestamp == null) return BigInt(0) - const hash = this.crypto.hash([txId, txTimestamp]) - return BigInt(XOR(nodeId, hash)) - } - - // sort the nodeList by rank, in descending order - orderNodesByRank(nodeList: Shardus.Node[], queueEntry: QueueEntry): Shardus.NodeWithRank[] { - const nodeListWithRankData: Shardus.NodeWithRank[] = [] - - for (let i = 0; i < nodeList.length; i++) { - const node: Shardus.Node = nodeList[i] - const rank = this.computeNodeRank(node.id, queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp) - const nodeWithRank: Shardus.NodeWithRank = { - rank, - id: node.id, - status: node.status, - publicKey: node.publicKey, - externalIp: node.externalIp, - externalPort: node.externalPort, - internalIp: node.internalIp, - internalPort: node.internalPort, - } - nodeListWithRankData.push(nodeWithRank) - } - return nodeListWithRankData.sort((a: Shardus.NodeWithRank, b: Shardus.NodeWithRank) => { - return b.rank > a.rank ? 1 : -1 - }) - } - - /** - * queueEntryGetTransactionGroup - * @param {QueueEntry} queueEntry - * @returns {Node[]} - */ - queueEntryGetTransactionGroup(queueEntry: QueueEntry, tryUpdate = false): Shardus.Node[] { - let cycleShardData = this.stateManager.currentCycleShardData - if (Context.config.stateManager.deterministicTXCycleEnabled) { - cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) - } - if (cycleShardData == null) { - throw new Error('queueEntryGetTransactionGroup: currentCycleShardData == null') - } - if (queueEntry.uniqueKeys == null) { - throw new Error('queueEntryGetTransactionGroup: queueEntry.uniqueKeys == null') - } - if (queueEntry.transactionGroup != null && tryUpdate != true) { - return queueEntry.transactionGroup - } - - const txGroup: Shardus.Node[] = [] - const uniqueNodes: StringNodeObjectMap = {} - - let hasNonGlobalKeys = false - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - const homeNode = queueEntry.homeNodes[key] - // txGroup = Array.concat(txGroup, homeNode.nodeThatStoreOurParitionFull) - if (homeNode == null) { - if (logFlags.verbose) this.mainLogger.debug('queueEntryGetTransactionGroup homenode:null') - } - if (homeNode.extendedData === false) { - ShardFunctions.computeExtendedNodePartitionData( - cycleShardData.shardGlobals, - cycleShardData.nodeShardDataMap, - cycleShardData.parititionShardDataMap, - homeNode, - cycleShardData.nodes - ) - } - - //may need to go back and sync this logic with how we decide what partition to save a record in. - - // If this is not a global TX then skip tracking of nodes for global accounts used as a reference. - if (queueEntry.globalModification === false) { - if (this.stateManager.accountGlobals.isGlobalAccount(key) === true) { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup skipping: ${utils.makeShortHash(key)} tx: ${queueEntry.logID}`) - continue - } else { - hasNonGlobalKeys = true - } - } - - for (const node of homeNode.nodeThatStoreOurParitionFull) { - // not iterable! - uniqueNodes[node.id] = node - if (node.id === Self.id) - if (logFlags.verbose) - /* prettier-ignore */ this.mainLogger.debug(`queueEntryGetTransactionGroup tx ${queueEntry.logID} our node coverage key ${key}`) - } - - const scratch1 = {} - for (const node of homeNode.nodeThatStoreOurParitionFull) { - // not iterable! - scratch1[node.id] = true - } - // make sure the home node is in there in case we hit and edge case - uniqueNodes[homeNode.node.id] = homeNode.node - - // TODO STATESHARDING4 is this next block even needed: - // HOMENODEMATHS need to patch in nodes that would cover this partition! - // TODO PERF make an optimized version of this in ShardFunctions that is smarter about which node range to check and saves off the calculation - // TODO PERF Update. this will scale badly with 100s or 1000s of nodes. need a faster solution that can use the list of accounts to - // build a list of nodes. - // maybe this could go on the partitions. - const { homePartition } = ShardFunctions.addressToPartition(cycleShardData.shardGlobals, key) - if (homePartition != homeNode.homePartition) { - //loop all nodes for now - for (const nodeID of cycleShardData.nodeShardDataMap.keys()) { - const nodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = - cycleShardData.nodeShardDataMap.get(nodeID) - const nodeStoresThisPartition = ShardFunctions.testInRange(homePartition, nodeShardData.storedPartitions) - /* eslint-disable security/detect-object-injection */ - if (nodeStoresThisPartition === true && uniqueNodes[nodeID] == null) { - //setting this will cause it to end up in the transactionGroup - uniqueNodes[nodeID] = nodeShardData.node - queueEntry.patchedOnNodes.set(nodeID, nodeShardData) - } - // build index for patched nodes based on the home node: - if (nodeStoresThisPartition === true) { - if (scratch1[nodeID] == null) { - homeNode.patchedOnNodes.push(nodeShardData.node) - scratch1[nodeID] = true - } - } - /* eslint-enable security/detect-object-injection */ - } - } - - //todo refactor this to where we insert the tx - if (queueEntry.globalModification === false && this.executeInOneShard && key === queueEntry.executionShardKey) { - //queueEntry.executionGroup = homeNode.consensusNodeForOurNodeFull.slice() - const executionKeys = [] - if (logFlags.verbose) { - for (const node of queueEntry.executionGroup) { - executionKeys.push(utils.makeShortHash(node.id) + `:${node.externalPort}`) - } - } - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup executeInOneShard ${queueEntry.logID} isInExecutionHome:${queueEntry.isInExecutionHome} executionGroup:${Utils.safeStringify(executionKeys)}`) - /* prettier-ignore */ if (logFlags.playback && logFlags.verbose) this.logger.playbackLogNote('queueEntryGetTransactionGroup', `queueEntryGetTransactionGroup executeInOneShard ${queueEntry.logID} isInExecutionHome:${queueEntry.isInExecutionHome} executionGroup:${Utils.safeStringify(executionKeys)}`) - } - - // if(queueEntry.globalModification === false && this.executeInOneShard && key === queueEntry.executionShardKey){ - // let ourNodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = this.stateManager.currentCycleShardData.nodeShardData - // let nodeStoresThisPartition = ShardFunctions.testInRange(homePartition, ourNodeShardData.storedPartitions) - // if(nodeStoresThisPartition === false){ - // queueEntry.isInExecutionHome = false - // queueEntry.waitForReceiptOnly = true - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup ${queueEntry.logID} isInExecutionHome:${queueEntry.isInExecutionHome} waitForReceiptOnly:${queueEntry.waitForReceiptOnly}`) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('queueEntryGetTransactionGroup', `queueEntryGetTransactionGroup ${queueEntry.logID} isInExecutionHome:${queueEntry.isInExecutionHome} waitForReceiptOnly:${queueEntry.waitForReceiptOnly}`) - // } - } - queueEntry.ourNodeInTransactionGroup = true - if (uniqueNodes[cycleShardData.ourNode.id] == null) { - queueEntry.ourNodeInTransactionGroup = false - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetTransactionGroup not involved: hasNonG:${hasNonGlobalKeys} tx ${queueEntry.logID}`) - } - if (queueEntry.ourNodeInTransactionGroup) - if (logFlags.seqdiagram) - /* prettier-ignore */ this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: targetgroup`) - - // make sure our node is included: needed for gossip! - although we may not care about the data! - // This may seem confusing, but to gossip to other nodes, we have to have our node in the list we will gossip to - // Other logic will use queueEntry.ourNodeInTransactionGroup to know what else to do with the queue entry - uniqueNodes[cycleShardData.ourNode.id] = cycleShardData.ourNode - - const values = Object.values(uniqueNodes) - for (const v of values) { - txGroup.push(v) - } - - txGroup.sort(this.stateManager._sortByIdAsc) - if (queueEntry.ourNodeInTransactionGroup) { - const ourID = cycleShardData.ourNode.id - for (let idx = 0; idx < txGroup.length; idx++) { - // eslint-disable-next-line security/detect-object-injection - const node = txGroup[idx] - if (node.id === ourID) { - queueEntry.ourTXGroupIndex = idx - break - } - } - } - if (tryUpdate != true) { - if (Context.config.stateManager.deterministicTXCycleEnabled === false) { - queueEntry.txGroupCycle = this.stateManager.currentCycleShardData.cycleNumber - } - queueEntry.transactionGroup = txGroup - } else { - queueEntry.updatedTxGroupCycle = this.stateManager.currentCycleShardData.cycleNumber - queueEntry.transactionGroup = txGroup - } - - // let uniqueNodes = {} - // for (let n of gossipGroup) { - // uniqueNodes[n.id] = n - // } - // for (let n of updatedGroup) { - // uniqueNodes[n.id] = n - // } - // let values = Object.values(uniqueNodes) - // let finalGossipGroup = - // for (let n of updatedGroup) { - // uniqueNodes[n.id] = n - // } - - return txGroup - } - - /** - * queueEntryGetConsensusGroup - * Gets a merged results of all the consensus nodes for all of the accounts involved in the transaction - * Ignores global accounts if globalModification == false and the account is global - * @param {QueueEntry} queueEntry - * @returns {Node[]} - */ - queueEntryGetConsensusGroup(queueEntry: QueueEntry): Shardus.Node[] { - let cycleShardData = this.stateManager.currentCycleShardData - if (Context.config.stateManager.deterministicTXCycleEnabled) { - cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) - } - if (cycleShardData == null) { - throw new Error('queueEntryGetConsensusGroup: currentCycleShardData == null') - } - if (queueEntry.uniqueKeys == null) { - throw new Error('queueEntryGetConsensusGroup: queueEntry.uniqueKeys == null') - } - if (queueEntry.conensusGroup != null) { - return queueEntry.conensusGroup - } - const txGroup = [] - const uniqueNodes: StringNodeObjectMap = {} - - let hasNonGlobalKeys = false - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - const homeNode = queueEntry.homeNodes[key] - if (homeNode == null) { - if (logFlags.verbose) this.mainLogger.debug('queueEntryGetConsensusGroup homenode:null') - } - if (homeNode.extendedData === false) { - ShardFunctions.computeExtendedNodePartitionData( - cycleShardData.shardGlobals, - cycleShardData.nodeShardDataMap, - cycleShardData.parititionShardDataMap, - homeNode, - cycleShardData.nodes - ) - } - - // TODO STATESHARDING4 GLOBALACCOUNTS is this next block of logic needed? - // If this is not a global TX then skip tracking of nodes for global accounts used as a reference. - if (queueEntry.globalModification === false) { - if (this.stateManager.accountGlobals.isGlobalAccount(key) === true) { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup skipping: ${utils.makeShortHash(key)} tx: ${queueEntry.logID}`) - continue - } else { - hasNonGlobalKeys = true - } - } - - for (const node of homeNode.consensusNodeForOurNodeFull) { - uniqueNodes[node.id] = node - } - - // make sure the home node is in there in case we hit and edge case - uniqueNodes[homeNode.node.id] = homeNode.node - } - queueEntry.ourNodeInConsensusGroup = true - if (uniqueNodes[cycleShardData.ourNode.id] == null) { - queueEntry.ourNodeInConsensusGroup = false - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup not involved: hasNonG:${hasNonGlobalKeys} tx ${queueEntry.logID}`) - } - - // make sure our node is included: needed for gossip! - although we may not care about the data! - uniqueNodes[cycleShardData.ourNode.id] = cycleShardData.ourNode - - const values = Object.values(uniqueNodes) - for (const v of values) { - txGroup.push(v) - } - queueEntry.conensusGroup = txGroup - return txGroup - } - - /** - * queueEntryGetConsensusGroupForAccount - * Gets a merged results of all the consensus nodes for a specific account involved in the transaction - * Ignores global accounts if globalModification == false and the account is global - * @param {QueueEntry} queueEntry - * @returns {Node[]} - */ - queueEntryGetConsensusGroupForAccount(queueEntry: QueueEntry, accountId: string): Shardus.Node[] { - let cycleShardData = this.stateManager.currentCycleShardData - if (Context.config.stateManager.deterministicTXCycleEnabled) { - cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) - } - if (cycleShardData == null) { - throw new Error('queueEntryGetConsensusGroup: currentCycleShardData == null') - } - if (queueEntry.uniqueKeys == null) { - throw new Error('queueEntryGetConsensusGroup: queueEntry.uniqueKeys == null') - } - if (queueEntry.conensusGroup != null) { - return queueEntry.conensusGroup - } - if (queueEntry.uniqueKeys.includes(accountId) === false) { - throw new Error(`queueEntryGetConsensusGroup: account ${accountId} is not in the queueEntry.uniqueKeys`) - } - const txGroup = [] - const uniqueNodes: StringNodeObjectMap = {} - - let hasNonGlobalKeys = false - const key = accountId - // eslint-disable-next-line security/detect-object-injection - const homeNode = queueEntry.homeNodes[key] - if (homeNode == null) { - if (logFlags.verbose) this.mainLogger.debug('queueEntryGetConsensusGroup homenode:null') - } - if (homeNode.extendedData === false) { - ShardFunctions.computeExtendedNodePartitionData( - cycleShardData.shardGlobals, - cycleShardData.nodeShardDataMap, - cycleShardData.parititionShardDataMap, - homeNode, - cycleShardData.nodes - ) - } - - // TODO STATESHARDING4 GLOBALACCOUNTS is this next block of logic needed? - // If this is not a global TX then skip tracking of nodes for global accounts used as a reference. - if (queueEntry.globalModification === false) { - if (this.stateManager.accountGlobals.isGlobalAccount(key) === true) { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup skipping: ${utils.makeShortHash(key)} tx: ${queueEntry.logID}`) - } else { - hasNonGlobalKeys = true - } - } - - for (const node of homeNode.consensusNodeForOurNodeFull) { - uniqueNodes[node.id] = node - } - - // make sure the home node is in there in case we hit and edge case - uniqueNodes[homeNode.node.id] = homeNode.node - queueEntry.ourNodeInConsensusGroup = true - if (uniqueNodes[cycleShardData.ourNode.id] == null) { - queueEntry.ourNodeInConsensusGroup = false - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`queueEntryGetConsensusGroup not involved: hasNonG:${hasNonGlobalKeys} tx ${queueEntry.logID}`) - } - - // make sure our node is included: needed for gossip! - although we may not care about the data! - uniqueNodes[cycleShardData.ourNode.id] = cycleShardData.ourNode - - const values = Object.values(uniqueNodes) - for (const v of values) { - txGroup.push(v) - } - return txGroup - } - /** - * tellCorrespondingNodes - * @param queueEntry - * -sends account data to the correct involved nodees - * -loads locally available data into the queue entry - */ - // async tellCorrespondingNodesOld(queueEntry: QueueEntry): Promise { - // if (this.stateManager.currentCycleShardData == null) { - // throw new Error('tellCorrespondingNodes: currentCycleShardData == null') - // } - // if (queueEntry.uniqueKeys == null) { - // throw new Error('tellCorrespondingNodes: queueEntry.uniqueKeys == null') - // } - // // Report data to corresponding nodes - // const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData - // // let correspondingEdgeNodes = [] - // let correspondingAccNodes: Shardus.Node[] = [] - // const dataKeysWeHave = [] - // const dataValuesWeHave = [] - // const datas: { [accountID: string]: Shardus.WrappedResponse } = {} - // const remoteShardsByKey: { [accountID: string]: StateManagerTypes.shardFunctionTypes.NodeShardData } = {} // shard homenodes that we do not have the data for. - // let loggedPartition = false - // for (const key of queueEntry.uniqueKeys) { - // /// test here - // // let hasKey = ShardFunctions.testAddressInRange(key, ourNodeData.storedPartitions) - // // todo : if this works maybe a nicer or faster version could be used - // let hasKey = false - // // eslint-disable-next-line security/detect-object-injection - // const homeNode = queueEntry.homeNodes[key] - // if (homeNode.node.id === ourNodeData.node.id) { - // hasKey = true - // } else { - // //perf todo: this seems like a slow calculation, coult improve this - // for (const node of homeNode.nodeThatStoreOurParitionFull) { - // if (node.id === ourNodeData.node.id) { - // hasKey = true - // break - // } - // } - // } - // - // // HOMENODEMATHS tellCorrespondingNodes patch the value of hasKey - // // did we get patched in - // if (queueEntry.patchedOnNodes.has(ourNodeData.node.id)) { - // hasKey = true - // } - // - // // for(let patchedNodeID of queueEntry.patchedOnNodes.values()){ - // // } - // - // let isGlobalKey = false - // //intercept that we have this data rather than requesting it. - // if (this.stateManager.accountGlobals.isGlobalAccount(key)) { - // hasKey = true - // isGlobalKey = true - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', queueEntry.logID, `tellCorrespondingNodes - has`) - // } - // - // if (hasKey === false) { - // if (loggedPartition === false) { - // loggedPartition = true - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false: ${utils.stringifyReduce(homeNode.nodeThatStoreOurParitionFull.map((v) => v.id))}`) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false: full: ${utils.stringifyReduce(homeNode.nodeThatStoreOurParitionFull)}`) - // } - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false key: ${utils.stringifyReduce(key)}`) - // } - // - // if (hasKey) { - // // TODO PERF is it possible that this query could be used to update our in memory cache? (this would save us from some slow look ups) later on - // // when checking timestamps.. alternatively maybe there is a away we can note the timestamp with what is returned here in the queueEntry data - // // and not have to deal with the cache. - // // todo old: Detect if our node covers this paritition.. need our partition data - // - // this.profiler.profileSectionStart('process_dapp.getRelevantData') - // this.profiler.scopedProfileSectionStart('process_dapp.getRelevantData') - // /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData old') - // let data = await this.app.getRelevantData( - // key, - // queueEntry.acceptedTx.data, - // queueEntry.acceptedTx.appData - // ) - // /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData old', DebugComplete.Completed) - // this.profiler.scopedProfileSectionEnd('process_dapp.getRelevantData') - // this.profiler.profileSectionEnd('process_dapp.getRelevantData') - // - // //only queue this up to share if it is not a global account. global accounts dont need to be shared. - // - // // not sure if it is correct to update timestamp like this. - // // if(data.timestamp === 0){ - // // data.timestamp = queueEntry.acceptedTx.timestamp - // // } - // - // //if this is not freshly created data then we need to make a backup copy of it!! - // //This prevents us from changing data before the commiting phase - // if (data.accountCreated == false) { - // data = utils.deepCopy(data) - // } - // - // if (isGlobalKey === false) { - // // eslint-disable-next-line security/detect-object-injection - // datas[key] = data - // dataKeysWeHave.push(key) - // dataValuesWeHave.push(data) - // } - // - // // eslint-disable-next-line security/detect-object-injection - // queueEntry.localKeys[key] = true - // // add this data to our own queue entry!! - // this.queueEntryAddData(queueEntry, data) - // } else { - // // eslint-disable-next-line security/detect-object-injection - // remoteShardsByKey[key] = queueEntry.homeNodes[key] - // } - // } - // if (queueEntry.globalModification === true) { - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodes', queueEntry.logID, `tellCorrespondingNodes - globalModification = true, not telling other nodes`) - // return - // } - // - // // if we are in the execution shard no need to forward data - // // This is because other nodes will not expect pre-apply data anymore (but they will send us their pre apply data) - // if ( - // queueEntry.globalModification === false && - // this.executeInOneShard && - // queueEntry.isInExecutionHome === true - // ) { - // //will this break things.. - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodes', queueEntry.logID, `tellCorrespondingNodes - isInExecutionHome = true, not telling other nodes`) - // return - // } - // - // let message: { stateList: Shardus.WrappedResponse[]; txid: string } - // let edgeNodeIds = [] - // let consensusNodeIds = [] - // - // const nodesToSendTo: StringNodeObjectMap = {} - // const doOnceNodeAccPair = new Set() //can skip node+acc if it happens more than once. - // - // for (const key of queueEntry.uniqueKeys) { - // // eslint-disable-next-line security/detect-object-injection - // if (datas[key] != null) { - // for (const key2 of queueEntry.uniqueKeys) { - // if (key !== key2) { - // // eslint-disable-next-line security/detect-object-injection - // const localHomeNode = queueEntry.homeNodes[key] - // // eslint-disable-next-line security/detect-object-injection - // const remoteHomeNode = queueEntry.homeNodes[key2] - // - // // //can ignore nodes not in the execution group since they will not be running apply - // // if(this.executeInOneShard && (queueEntry.executionIdSet.has(remoteHomeNode.node.id) === false)){ - // // continue - // // } - // - // const ourLocalConsensusIndex = localHomeNode.consensusNodeForOurNodeFull.findIndex( - // (a) => a.id === ourNodeData.node.id - // ) - // if (ourLocalConsensusIndex === -1) { - // continue - // } - // - // edgeNodeIds = [] - // consensusNodeIds = [] - // correspondingAccNodes = [] - // - // // must add one to each lookup index! - // const indicies = ShardFunctions.debugFastStableCorrespondingIndicies( - // localHomeNode.consensusNodeForOurNodeFull.length, - // remoteHomeNode.consensusNodeForOurNodeFull.length, - // ourLocalConsensusIndex + 1 - // ) - // const edgeIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( - // localHomeNode.consensusNodeForOurNodeFull.length, - // remoteHomeNode.edgeNodes.length, - // ourLocalConsensusIndex + 1 - // ) - // - // let patchIndicies = [] - // if (remoteHomeNode.patchedOnNodes.length > 0) { - // patchIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( - // localHomeNode.consensusNodeForOurNodeFull.length, - // remoteHomeNode.patchedOnNodes.length, - // ourLocalConsensusIndex + 1 - // ) - // } - // - // // HOMENODEMATHS need to work out sending data to our patched range. - // // let edgeIndicies = ShardFunctions.debugFastStableCorrespondingIndicies(localHomeNode.consensusNodeForOurNodeFull.length, remoteHomeNode.edgeNodes.length, ourLocalConsensusIndex + 1) - // - // // for each remote node lets save it's id - // for (const index of indicies) { - // const node = remoteHomeNode.consensusNodeForOurNodeFull[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - // if (node != null && node.id !== ourNodeData.node.id) { - // nodesToSendTo[node.id] = node - // consensusNodeIds.push(node.id) - // } - // } - // for (const index of edgeIndicies) { - // const node = remoteHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - // if (node != null && node.id !== ourNodeData.node.id) { - // nodesToSendTo[node.id] = node - // edgeNodeIds.push(node.id) - // } - // } - // - // for (const index of patchIndicies) { - // const node = remoteHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - // if (node != null && node.id !== ourNodeData.node.id) { - // nodesToSendTo[node.id] = node - // //edgeNodeIds.push(node.id) - // } - // } - // - // const dataToSend: Shardus.WrappedResponse[] = [] - // // eslint-disable-next-line security/detect-object-injection - // dataToSend.push(datas[key]) // only sending just this one key at a time - // - // // sign each account data - // for (let data of dataToSend) { - // data = this.crypto.sign(data) - // } - // - // message = { stateList: dataToSend, txid: queueEntry.acceptedTx.txId } - // - // //correspondingAccNodes = Object.values(nodesToSendTo) - // - // //build correspondingAccNodes, but filter out nodeid, account key pairs we have seen before - // for (const [accountID, node] of Object.entries(nodesToSendTo)) { - // const keyPair = accountID + key - // if (node != null && doOnceNodeAccPair.has(keyPair) === false) { - // doOnceNodeAccPair.add(keyPair) - // - // // consider this optimization later (should make it so we only send to execution set nodes) - // // if(queueEntry.executionIdSet.has(remoteHomeNode.node.id) === true){ - // // correspondingAccNodes.push(node) - // // } - // correspondingAccNodes.push(node) - // } - // } - // - // if (correspondingAccNodes.length > 0) { - // const remoteRelation = ShardFunctions.getNodeRelation( - // remoteHomeNode, - // this.stateManager.currentCycleShardData.ourNode.id - // ) - // const localRelation = ShardFunctions.getNodeRelation( - // localHomeNode, - // this.stateManager.currentCycleShardData.ourNode.id - // ) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_tellCorrespondingNodes', `${queueEntry.acceptedTx.txId}`, `remoteRel: ${remoteRelation} localrel: ${localRelation} qId: ${queueEntry.entryID} AccountBeingShared: ${utils.makeShortHash(key)} EdgeNodes:${utils.stringifyReduce(edgeNodeIds)} ConsesusNodes${utils.stringifyReduce(consensusNodeIds)}`) - // - // // Filter nodes before we send tell() - // const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( - // correspondingAccNodes, - // 'tellCorrespondingNodes', - // true, - // true - // ) - // if (filteredNodes.length === 0) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('tellCorrespondingNodes: filterValidNodesForInternalMessage no valid nodes left to try') - // return null - // } - // const filterdCorrespondingAccNodes = filteredNodes - // - // this.broadcastState(filterdCorrespondingAccNodes, message) - // } - // } - // } - // } - // } - // } - - async broadcastState( - nodes: Shardus.Node[], - message: { stateList: Shardus.WrappedResponse[]; txid: string }, - context: string - ): Promise { - // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.broadcastStateBinary) { - // convert legacy message to binary supported type - const request = message as BroadcastStateReq - if (logFlags.seqdiagram) { - for (const node of nodes) { - if (context == 'tellCorrespondingNodes') { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_state_nodes'}`) - } else { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_state_neighbour'}`) - } - } - } - this.p2p.tellBinary( - nodes, - InternalRouteEnum.binary_broadcast_state, - request, - serializeBroadcastStateReq, - { - verification_data: verificationDataCombiner( - message.txid, - message.stateList.length.toString(), - request.stateList[0].accountId - ), - } - ) - // return - // } - // this.p2p.tell(nodes, 'broadcast_state', message) - } - - /** - * tellCorrespondingNodes - * @param queueEntry - * -sends account data to the correct involved nodees - * -loads locally available data into the queue entry - */ - async tellCorrespondingNodes(queueEntry: QueueEntry): Promise { - if (this.stateManager.currentCycleShardData == null) { - throw new Error('tellCorrespondingNodes: currentCycleShardData == null') - } - if (queueEntry.uniqueKeys == null) { - throw new Error('tellCorrespondingNodes: queueEntry.uniqueKeys == null') - } - // Report data to corresponding nodes - const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData - // let correspondingEdgeNodes = [] - let correspondingAccNodes: Shardus.Node[] = [] - const dataKeysWeHave = [] - const dataValuesWeHave = [] - const datas: { [accountID: string]: Shardus.WrappedResponse } = {} - const remoteShardsByKey: { [accountID: string]: StateManagerTypes.shardFunctionTypes.NodeShardData } = {} // shard homenodes that we do not have the data for. - let loggedPartition = false - for (const key of queueEntry.uniqueKeys) { - /// test here - // let hasKey = ShardFunctions.testAddressInRange(key, ourNodeData.storedPartitions) - // todo : if this works maybe a nicer or faster version could be used - let hasKey = false - // eslint-disable-next-line security/detect-object-injection - const homeNode = queueEntry.homeNodes[key] - if (homeNode.node.id === ourNodeData.node.id) { - hasKey = true - } else { - //perf todo: this seems like a slow calculation, coult improve this - for (const node of homeNode.nodeThatStoreOurParitionFull) { - if (node.id === ourNodeData.node.id) { - hasKey = true - break - } - } - } - - // HOMENODEMATHS tellCorrespondingNodes patch the value of hasKey - // did we get patched in - if (queueEntry.patchedOnNodes.has(ourNodeData.node.id)) { - hasKey = true - } - - // for(let patchedNodeID of queueEntry.patchedOnNodes.values()){ - // } - - let isGlobalKey = false - //intercept that we have this data rather than requesting it. - if (this.stateManager.accountGlobals.isGlobalAccount(key)) { - hasKey = true - isGlobalKey = true - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', queueEntry.logID, `tellCorrespondingNodes - has`) - } - - if (hasKey === false) { - if (loggedPartition === false) { - loggedPartition = true - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false: ${utils.stringifyReduce(homeNode.nodeThatStoreOurParitionFull.map((v) => v.id))}`) - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false: full: ${utils.stringifyReduce(homeNode.nodeThatStoreOurParitionFull)}`) - } - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodes hasKey=false key: ${utils.stringifyReduce(key)}`) - } - - if (hasKey) { - // TODO PERF is it possible that this query could be used to update our in memory cache? (this would save us from some slow look ups) later on - // when checking timestamps.. alternatively maybe there is a away we can note the timestamp with what is returned here in the queueEntry data - // and not have to deal with the cache. - // todo old: Detect if our node covers this paritition.. need our partition data - - this.profiler.profileSectionStart('process_dapp.getRelevantData') - this.profiler.scopedProfileSectionStart('process_dapp.getRelevantData') - /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData') - let data = await this.app.getRelevantData(key, queueEntry.acceptedTx.data, queueEntry.acceptedTx.appData) - /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData', DebugComplete.Completed) - this.profiler.scopedProfileSectionEnd('process_dapp.getRelevantData') - this.profiler.profileSectionEnd('process_dapp.getRelevantData') - - //only queue this up to share if it is not a global account. global accounts dont need to be shared. - - // not sure if it is correct to update timestamp like this. - // if(data.timestamp === 0){ - // data.timestamp = queueEntry.acceptedTx.timestamp - // } - - //if this is not freshly created data then we need to make a backup copy of it!! - //This prevents us from changing data before the commiting phase - if (data.accountCreated == false) { - data = utils.deepCopy(data) - } - - if (isGlobalKey === false) { - // eslint-disable-next-line security/detect-object-injection - datas[key] = data - dataKeysWeHave.push(key) - dataValuesWeHave.push(data) - } - - // eslint-disable-next-line security/detect-object-injection - queueEntry.localKeys[key] = true - // add this data to our own queue entry!! - this.queueEntryAddData(queueEntry, data, false) - } else { - // eslint-disable-next-line security/detect-object-injection - remoteShardsByKey[key] = queueEntry.homeNodes[key] - } - } - if (queueEntry.globalModification === true) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodes', queueEntry.logID, `tellCorrespondingNodes - globalModification = true, not telling other nodes`) - return - } - - let message: { stateList: Shardus.WrappedResponse[]; txid: string } - let edgeNodeIds = [] - let consensusNodeIds = [] - - const nodesToSendTo: StringNodeObjectMap = {} - const doOnceNodeAccPair = new Set() //can skip node+acc if it happens more than once. - - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (datas[key] != null) { - for (const key2 of queueEntry.uniqueKeys) { - if (key !== key2) { - // eslint-disable-next-line security/detect-object-injection - const localHomeNode = queueEntry.homeNodes[key] - // eslint-disable-next-line security/detect-object-injection - const remoteHomeNode = queueEntry.homeNodes[key2] - - const ourLocalConsensusIndex = localHomeNode.consensusNodeForOurNodeFull.findIndex( - (a) => a.id === ourNodeData.node.id - ) - if (ourLocalConsensusIndex === -1) { - continue - } - - edgeNodeIds = [] - consensusNodeIds = [] - correspondingAccNodes = [] - - const ourSendingGroupSize = localHomeNode.consensusNodeForOurNodeFull.length - - const targetConsensusGroupSize = remoteHomeNode.consensusNodeForOurNodeFull.length - const targetEdgeGroupSize = remoteHomeNode.edgeNodes.length - const pachedListSize = remoteHomeNode.patchedOnNodes.length - - // must add one to each lookup index! - const indicies = ShardFunctions.debugFastStableCorrespondingIndicies( - ourSendingGroupSize, - targetConsensusGroupSize, - ourLocalConsensusIndex + 1 - ) - const edgeIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( - ourSendingGroupSize, - targetEdgeGroupSize, - ourLocalConsensusIndex + 1 - ) - - let patchIndicies = [] - if (remoteHomeNode.patchedOnNodes.length > 0) { - patchIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( - ourSendingGroupSize, - remoteHomeNode.patchedOnNodes.length, - ourLocalConsensusIndex + 1 - ) - } - - // for each remote node lets save it's id - for (const index of indicies) { - const targetNode = remoteHomeNode.consensusNodeForOurNodeFull[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - //only send data to the execution group - if (queueEntry.executionGroupMap.has(targetNode.id) === false) { - continue - } - - if (targetNode != null && targetNode.id !== ourNodeData.node.id) { - nodesToSendTo[targetNode.id] = targetNode - consensusNodeIds.push(targetNode.id) - } - } - for (const index of edgeIndicies) { - const targetNode = remoteHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - if (targetNode != null && targetNode.id !== ourNodeData.node.id) { - //only send data to the execution group - if (queueEntry.executionGroupMap.has(targetNode.id) === false) { - continue - } - nodesToSendTo[targetNode.id] = targetNode - edgeNodeIds.push(targetNode.id) - } - } - - for (const index of patchIndicies) { - const targetNode = remoteHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - //only send data to the execution group - if (queueEntry.executionGroupMap.has(targetNode.id) === false) { - continue - } - if (targetNode != null && targetNode.id !== ourNodeData.node.id) { - nodesToSendTo[targetNode.id] = targetNode - //edgeNodeIds.push(targetNode.id) - } - } - - const dataToSend = [] - // eslint-disable-next-line security/detect-object-injection - dataToSend.push(datas[key]) // only sending just this one key at a time - - // sign each account data - for (let data of dataToSend) { - data = this.crypto.sign(data) - } - - message = { stateList: dataToSend, txid: queueEntry.acceptedTx.txId } - - //build correspondingAccNodes, but filter out nodeid, account key pairs we have seen before - for (const [accountID, node] of Object.entries(nodesToSendTo)) { - const keyPair = accountID + key - if (node != null && doOnceNodeAccPair.has(keyPair) === false) { - doOnceNodeAccPair.add(keyPair) - correspondingAccNodes.push(node) - } - } - - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodes', queueEntry.logID, `tellCorrespondingNodes nodesToSendTo:${Object.keys(nodesToSendTo).length} doOnceNodeAccPair:${doOnceNodeAccPair.size} indicies:${Utils.safeStringify(indicies)} edgeIndicies:${Utils.safeStringify(edgeIndicies)} patchIndicies:${Utils.safeStringify(patchIndicies)} doOnceNodeAccPair: ${Utils.safeStringify([...doOnceNodeAccPair.keys()])} ourLocalConsensusIndex:${ourLocalConsensusIndex} ourSendingGroupSize:${ourSendingGroupSize} targetEdgeGroupSize:${targetEdgeGroupSize} targetEdgeGroupSize:${targetEdgeGroupSize} pachedListSize:${pachedListSize}`) - - if (correspondingAccNodes.length > 0) { - const remoteRelation = ShardFunctions.getNodeRelation( - remoteHomeNode, - this.stateManager.currentCycleShardData.ourNode.id - ) - const localRelation = ShardFunctions.getNodeRelation( - localHomeNode, - this.stateManager.currentCycleShardData.ourNode.id - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_tellCorrespondingNodes', `${queueEntry.acceptedTx.txId}`, `remoteRel: ${remoteRelation} localrel: ${localRelation} qId: ${queueEntry.entryID} AccountBeingShared: ${utils.makeShortHash(key)} EdgeNodes:${utils.stringifyReduce(edgeNodeIds)} ConsesusNodes${utils.stringifyReduce(consensusNodeIds)}`) - - // Filter nodes before we send tell() - const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( - correspondingAccNodes, - 'tellCorrespondingNodes', - true, - true - ) - if (filteredNodes.length === 0) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('tellCorrespondingNodes: filterValidNodesForInternalMessage no valid nodes left to try') - return null - } - const filterdCorrespondingAccNodes = filteredNodes - - this.broadcastState(filterdCorrespondingAccNodes, message, 'tellCorrespondingNodes') - } - } - } - } - } - } - - async factTellCorrespondingNodes(queueEntry: QueueEntry): Promise { - try { - let cycleShardData = this.stateManager.currentCycleShardData - if (Context.config.stateManager.deterministicTXCycleEnabled) { - cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) - } - if (cycleShardData == null) { - throw new Error('factTellCorrespondingNodes: cycleShardData == null') - } - if (queueEntry.uniqueKeys == null) { - throw new Error('factTellCorrespondingNodes: queueEntry.uniqueKeys == null') - } - const ourNodeData = cycleShardData.nodeShardData - const dataKeysWeHave = [] - const dataValuesWeHave = [] - const datas: { [accountID: string]: Shardus.WrappedResponse } = {} - const remoteShardsByKey: { [accountID: string]: StateManagerTypes.shardFunctionTypes.NodeShardData } = {} // shard homenodes that we do not have the data for. - let loggedPartition = false - for (const key of queueEntry.uniqueKeys) { - let hasKey = ShardFunctions.testAddressInRange(key, ourNodeData.storedPartitions) - - // HOMENODEMATHS factTellCorrespondingNodes patch the value of hasKey - // did we get patched in - if (queueEntry.patchedOnNodes.has(ourNodeData.node.id)) { - hasKey = true - } - - let isGlobalKey = false - //intercept that we have this data rather than requesting it. - if (this.stateManager.accountGlobals.isGlobalAccount(key)) { - hasKey = true - isGlobalKey = true - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('globalAccountMap', queueEntry.logID, `factTellCorrespondingNodes - has`) - } - - if (hasKey === false) { - if (loggedPartition === false) { - loggedPartition = true - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodes hasKey=false`) - } - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodes hasKey=false key: ${utils.stringifyReduce(key)}`) - } - - if (hasKey) { - // TODO PERF is it possible that this query could be used to update our in memory cache? (this would save us from some slow look ups) later on - // when checking timestamps.. alternatively maybe there is a away we can note the timestamp with what is returned here in the queueEntry data - // and not have to deal with the cache. - // todo old: Detect if our node covers this paritition.. need our partition data - - this.profiler.profileSectionStart('process_dapp.getRelevantData') - this.profiler.scopedProfileSectionStart('process_dapp.getRelevantData') - /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData') - let data = await this.app.getRelevantData(key, queueEntry.acceptedTx.data, queueEntry.acceptedTx.appData) - /* prettier-ignore */ this.setDebugLastAwaitedCallInner('this.stateManager.transactionQueue.app.getRelevantData', DebugComplete.Completed) - this.profiler.scopedProfileSectionEnd('process_dapp.getRelevantData') - this.profiler.profileSectionEnd('process_dapp.getRelevantData') - - //if this is not freshly created data then we need to make a backup copy of it!! - //This prevents us from changing data before the commiting phase - if (data.accountCreated == false) { - data = utils.deepCopy(data) - } - - //only queue this up to share if it is not a global account. global accounts dont need to be shared. - if (isGlobalKey === false) { - // eslint-disable-next-line security/detect-object-injection - datas[key] = data - dataKeysWeHave.push(key) - dataValuesWeHave.push(data) - } - - // eslint-disable-next-line security/detect-object-injection - queueEntry.localKeys[key] = true - // add this data to our own queue entry!! - this.queueEntryAddData(queueEntry, data, false) - } else { - // eslint-disable-next-line security/detect-object-injection - remoteShardsByKey[key] = queueEntry.homeNodes[key] - } - } - if (queueEntry.globalModification === true) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('factTellCorrespondingNodes', queueEntry.logID, `factTellCorrespondingNodes - globalModification = true, not telling other nodes`) - return - } - - const payload: { stateList: Shardus.WrappedResponse[]; txid: string } = { - stateList: [], - txid: queueEntry.acceptedTx.txId, - } - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (datas[key] != null) { - // eslint-disable-next-line security/detect-object-injection - payload.stateList.push(datas[key]) // only sending just this one key at a time - } - } - // sign each account data - const signedPayload = this.crypto.sign(payload) - - // prepare inputs to get corresponding indices - const ourIndexInTxGroup = queueEntry.ourTXGroupIndex - const targetGroup = queueEntry.executionNodeIdSorted - const targetGroupSize = targetGroup.length - const senderGroupSize = targetGroupSize - - // calculate target start and end indices in txGroup - const targetIndices = this.getStartAndEndIndexOfTargetGroup(targetGroup, queueEntry.transactionGroup) - const unwrappedIndex = queueEntry.isSenderWrappedTxGroup[Self.id] - - // temp logs - if (logFlags.verbose) { - this.mainLogger.debug(`factTellCorrespondingNodes: target group size`, targetGroup.length, targetGroup) - this.mainLogger.debug( - `factTellCorrespondingNodes: tx group size`, - queueEntry.transactionGroup.length, - queueEntry.transactionGroup.map((n) => n.id) - ) - this.mainLogger.debug( - `factTellCorrespondingNodes: getting corresponding indices for tx: ${queueEntry.logID}`, - ourIndexInTxGroup, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length - ) - this.mainLogger.debug(`factTellCorrespondingNodes: target group indices`, targetIndices) - } - - let correspondingIndices = getCorrespondingNodes( - ourIndexInTxGroup, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length - ) - let oldCorrespondingIndices: number[] = undefined - if (this.config.stateManager.correspondingTellUseUnwrapped) { - // can just find if any home nodes for the accounts we cover would say that our node is wrapped - // precalc shouldUnwrapSender check if any account we own shows that we are on the left side of a wrapped range - // can use partitions to check this - if (unwrappedIndex != null) { - const extraCorrespondingIndices = getCorrespondingNodes( - unwrappedIndex, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length, - queueEntry.logID - ) - if (Context.config.stateManager.concatCorrespondingTellUseUnwrapped) { - //add them - correspondingIndices = correspondingIndices.concat(extraCorrespondingIndices) - } else { - // replace them - oldCorrespondingIndices = correspondingIndices - correspondingIndices = extraCorrespondingIndices - } - //replace them - // possible optimization where we pick one or the other path based on our account index - //correspondingIndices = extraCorrespondingIndices - } - } - // check if we should avoid our index in the corresponding nodes - if (Context.config.stateManager.avoidOurIndexInFactTell && correspondingIndices.includes(ourIndexInTxGroup)) { - if (logFlags.debug) - this.mainLogger.debug( - `factTellCorrespondingNodes: avoiding our index in tx group`, - ourIndexInTxGroup, - correspondingIndices - ) - queueEntry.correspondingGlobalOffset += 1 - nestedCountersInstance.countEvent('stateManager', 'factTellCorrespondingNodes: avoiding our index in tx group') - correspondingIndices = getCorrespondingNodes( - ourIndexInTxGroup, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length - ) - let oldCorrespondingIndices: number[] = undefined - if (this.config.stateManager.correspondingTellUseUnwrapped) { - // can just find if any home nodes for the accounts we cover would say that our node is wrapped - // precalc shouldUnwrapSender check if any account we own shows that we are on the left side of a wrapped range - // can use partitions to check this - if (unwrappedIndex != null) { - const extraCorrespondingIndices = getCorrespondingNodes( - unwrappedIndex, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length, - queueEntry.logID - ) - if (Context.config.stateManager.concatCorrespondingTellUseUnwrapped) { - //add them - correspondingIndices = correspondingIndices.concat(extraCorrespondingIndices) - } else { - // replace them - oldCorrespondingIndices = correspondingIndices - correspondingIndices = extraCorrespondingIndices - } - //replace them - // possible optimization where we pick one or the other path based on our account index - //correspondingIndices = extraCorrespondingIndices - } - } - if (logFlags.debug) - this.mainLogger.debug( - `factTellCorrespondingNodes: new corresponding indices after avoiding our index in tx group`, - ourIndexInTxGroup, - correspondingIndices - ) - } - - const validCorrespondingIndices = [] - for (const targetIndex of correspondingIndices) { - validCorrespondingIndices.push(targetIndex) - - // if (logFlags.debug) { - // // debug verification code - // const isValid = verifyCorrespondingSender(targetIndex, ourIndexInTxGroup, queueEntry.correspondingGlobalOffset, targetGroupSize, senderGroupSize, targetIndices.startIndex, targetIndices.endIndex, queueEntry.transactionGroup.length) - // if (logFlags.debug) this.mainLogger.debug(`factTellCorrespondingNodes: debug verifyCorrespondingSender`, ourIndexInTxGroup, '->', targetIndex, isValid); - // } - } - - const correspondingNodes = [] - for (const index of validCorrespondingIndices) { - if (index === ourIndexInTxGroup) { - continue - } - const targetNode = queueEntry.transactionGroup[index] - let targetHasOurData = false - - if (this.config.stateManager.filterReceivingNodesForTXData) { - targetHasOurData = true - for (const wrappedResponse of signedPayload.stateList) { - const accountId = wrappedResponse.accountId - const targetNodeShardData = cycleShardData.nodeShardDataMap.get(targetNode.id) - if (targetNodeShardData == null) { - targetHasOurData = false - break - } - const targetHasKey = ShardFunctions.testAddressInRange(accountId, targetNodeShardData.storedPartitions) - if (targetHasKey === false) { - targetHasOurData = false - break - } - } - } - - // send only if target needs our data - if (targetHasOurData === false) { - correspondingNodes.push(targetNode) - } - } - - const callParams = { - oi: unwrappedIndex ?? ourIndexInTxGroup, - st: targetIndices.startIndex, - et: targetIndices.endIndex, - gl: queueEntry.correspondingGlobalOffset, - tg: targetGroupSize, - sg: senderGroupSize, - tn: queueEntry.transactionGroup.length, - } - - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodes: correspondingIndices and nodes ${queueEntry.logID}`, ourIndexInTxGroup, correspondingIndices, correspondingNodes.map(n => n.id), callParams) - queueEntry.txDebug.correspondingDebugInfo = { - ourIndex: ourIndexInTxGroup, - ourUnwrappedIndex: unwrappedIndex, - callParams, - localKeys: queueEntry.localKeys, - oldCorrespondingIndices, - correspondingIndices: correspondingIndices, - correspondingNodeIds: correspondingNodes.map((n) => n.id), - } - if (correspondingNodes.length === 0) { - nestedCountersInstance.countEvent( - 'stateManager', - 'factTellCorrespondingNodes: no corresponding nodes needed to send' - ) - return - } - // Filter nodes before we send tell() - const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( - correspondingNodes, - 'factTellCorrespondingNodes', - true, - true - ) - if (filteredNodes.length === 0) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error("factTellCorrespondingNodes: filterValidNodesForInternalMessage no valid nodes left to try"); - nestedCountersInstance.countEvent( - 'stateManager', - 'factTellCorrespondingNodes: no corresponding nodes needed to send' - ) - return null - } - if (payload.stateList.length === 0) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error("factTellCorrespondingNodes: filterValidNodesForInternalMessage payload.stateList.length === 0"); - nestedCountersInstance.countEvent('stateManager', 'factTellCorrespondingNodes: payload.stateList.length === 0') - return null - } - // send payload to each node in correspondingNodes - this.broadcastState(filteredNodes, payload, 'factTellCorrespondingNodes') - } catch (error) { - /* prettier-ignore */ this.statemanager_fatal( `factTellCorrespondingNodes_ex`, 'factTellCorrespondingNodes' + utils.formatErrorMessage(error) ) - } - } - - validateCorrespondingTellSender(queueEntry: QueueEntry, dataKey: string, senderNodeId: string): boolean { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`validateCorrespondingTellSender: data key: ${dataKey} sender node id: ${senderNodeId}`) - const receiverNode = this.stateManager.currentCycleShardData.nodeShardData - if (receiverNode == null) return false - - const receiverIsInExecutionGroup = queueEntry.executionGroupMap.has(receiverNode.node.id) - - const senderNode = this.stateManager.currentCycleShardData.nodeShardDataMap.get(senderNodeId) - if (senderNode === null) return false - - const senderHasAddress = ShardFunctions.testAddressInRange(dataKey, senderNode.storedPartitions) - - if (configContext.stateManager.shareCompleteData) { - const senderIsInExecutionGroup = queueEntry.executionGroupMap.has(senderNodeId) - - // check if sender is an execution neighouring node - const neighbourNodes = utils.selectNeighbors( - queueEntry.executionGroup, - queueEntry.ourExGroupIndex, - 2 - ) as Shardus.Node[] - const neighbourNodeIds = neighbourNodes.map((node) => node.id) - if (senderIsInExecutionGroup && neighbourNodeIds.includes(senderNodeId) === false) { - this.mainLogger.error(`validateCorrespondingTellSender: sender is an execution node but not a neighbour node`) - return false - } - if (senderIsInExecutionGroup) - nestedCountersInstance.countEvent( - 'stateManager', - 'validateCorrespondingTellSender: sender is an execution node' - ) - else - nestedCountersInstance.countEvent( - 'stateManager', - 'validateCorrespondingTellSender: sender is not an execution node' - ) - - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`validateCorrespondingTellSender: data key: ${dataKey} sender node id: ${senderNodeId} senderHasAddress: ${senderHasAddress} receiverIsInExecutionGroup: ${receiverIsInExecutionGroup} senderIsInExecutionGroup: ${senderIsInExecutionGroup}`) - if (receiverIsInExecutionGroup === true || senderHasAddress === true || senderIsInExecutionGroup === true) { - return true - } - } else { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`validateCorrespondingTellSender: data key: ${dataKey} sender node id: ${senderNodeId} senderHasAddress: ${senderHasAddress} receiverIsInExecutionGroup: ${receiverIsInExecutionGroup}`) - if (receiverIsInExecutionGroup === true || senderHasAddress === true) { - return true - } - } - - return false - } - - factValidateCorrespondingTellSender(queueEntry: QueueEntry, dataKey: string, senderNodeId: string): boolean { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factValidateCorrespondingTellSender: txId: ${queueEntry.acceptedTx.txId} sender node id: ${senderNodeId}, receiver id: ${Self.id}`) - let cycleShardData = this.stateManager.currentCycleShardData - if (Context.config.stateManager.deterministicTXCycleEnabled) { - cycleShardData = this.stateManager.shardValuesByCycle.get(queueEntry.txGroupCycle) - } - const receiverNodeShardData = cycleShardData.nodeShardData - if (receiverNodeShardData == null) { - this.mainLogger.error( - `factValidateCorrespondingTellSender: logID: ${queueEntry.logID} receiverNodeShardData == null, txGroupCycle: ${queueEntry.txGroupCycle}}` - ) - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellSender: receiverNodeShardData == null' - ) - return false - } - - const senderNodeShardData = cycleShardData.nodeShardDataMap.get(senderNodeId) - if (senderNodeShardData === null) { - this.mainLogger.error( - `factValidateCorrespondingTellSender: logID: ${queueEntry.logID} senderNodeShardData == null, txGroupCycle: ${queueEntry.txGroupCycle}}` - ) - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellSender: senderNodeShardData == null' - ) - return false - } - const senderHasAddress = ShardFunctions.testAddressInRange(dataKey, senderNodeShardData.storedPartitions) - - // check if it is a FACT sender - const receivingNodeIndex = queueEntry.ourTXGroupIndex // we are the receiver - const senderNodeIndex = queueEntry.transactionGroup.findIndex((node) => node.id === senderNodeId) - let wrappedSenderNodeIndex = null - if (queueEntry.isSenderWrappedTxGroup[senderNodeId] != null) { - wrappedSenderNodeIndex = queueEntry.isSenderWrappedTxGroup[senderNodeId] - } - const receiverGroupSize = queueEntry.executionNodeIdSorted.length - const senderGroupSize = receiverGroupSize - - const targetGroup = queueEntry.executionNodeIdSorted - const targetIndices = this.getStartAndEndIndexOfTargetGroup(targetGroup, queueEntry.transactionGroup) - - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factValidateCorrespondingTellSender: txId: ${queueEntry.acceptedTx.txId} sender node id: ${senderNodeId}, receiver id: ${Self.id} senderHasAddress: ${senderHasAddress} receivingNodeIndex: ${receivingNodeIndex} senderNodeIndex: ${senderNodeIndex} receiverGroupSize: ${receiverGroupSize} senderGroupSize: ${senderGroupSize} targetIndices: ${utils.stringifyReduce(targetIndices)}`) - - let isValidFactSender = verifyCorrespondingSender( - receivingNodeIndex, - senderNodeIndex, - queueEntry.correspondingGlobalOffset, - receiverGroupSize, - senderGroupSize, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.transactionGroup.length, - false, - queueEntry.logID - ) - if (isValidFactSender === false && wrappedSenderNodeIndex != null && wrappedSenderNodeIndex >= 0) { - // try again with wrapped sender index - isValidFactSender = verifyCorrespondingSender( - receivingNodeIndex, - wrappedSenderNodeIndex, - queueEntry.correspondingGlobalOffset, - receiverGroupSize, - senderGroupSize, - targetIndices.startIndex, - targetIndices.endIndex, - queueEntry.transactionGroup.length, - false, - queueEntry.logID - ) - } - // it maybe a FACT sender but sender does not cover the account - if (senderHasAddress === false) { - this.mainLogger.error( - `factValidateCorrespondingTellSender: logId: ${queueEntry.logID} sender does not have the address and is not a exe neighbour` - ) - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellSender: sender does not have the address and is not a exe; neighbour' - ) - return false - } - - // it is neither a FACT corresponding node nor an exe neighbour node - if (isValidFactSender === false) { - this.mainLogger.error( - `factValidateCorrespondingTellSender: logId: ${queueEntry.logID} sender is neither a valid sender nor a neighbour node isValidSender: ${isValidFactSender}` - ) - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellSender: sender is not a valid sender or a neighbour node' - ) - return false - } - return true - } - - getStartAndEndIndexOfTargetGroup( - targetGroup: string[], - transactionGroup: (Shardus.NodeWithRank | P2PTypes.NodeListTypes.Node)[] - ): { startIndex: number; endIndex: number } { - const targetIndexes: number[] = [] - for (let i = 0; i < transactionGroup.length; i++) { - const nodeId = transactionGroup[i].id - if (targetGroup.indexOf(nodeId) >= 0) { - targetIndexes.push(i) - } - } - if (logFlags.verbose) this.mainLogger.debug(`getStartAndEndIndexOfTargetGroup: all target indexes`, targetIndexes) - const n = targetIndexes.length - let startIndex = targetIndexes[0] - // Find the pivot where the circular array starts - for (let i = 1; i < n; i++) { - if (targetIndexes[i] > targetIndexes[i - 1] + 1) { - startIndex = targetIndexes[i] - break - } - } - let endIndex = startIndex + n - if (endIndex > transactionGroup.length) { - endIndex = endIndex - transactionGroup.length - } - return { startIndex, endIndex } - } - - /** - * After a reciept is formed, use this to send updated account data to shards that did not execute a change - * I am keeping the async tag because this function does kick off async tasks it just does not await them - * I think this tag makes it more clear that this function is not a simple synchronous function - * @param queueEntry - * @returns - */ - // async tellCorrespondingNodesFinalData(queueEntry: QueueEntry): Promise { - // profilerInstance.profileSectionStart('tellCorrespondingNodesFinalData', true) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodesFinalData', queueEntry.logID, `tellCorrespondingNodesFinalData - start: ${queueEntry.logID}`) - - // if (this.stateManager.currentCycleShardData == null) { - // throw new Error('tellCorrespondingNodesFinalData: currentCycleShardData == null') - // } - // if (queueEntry.uniqueKeys == null) { - // throw new Error('tellCorrespondingNodesFinalData: queueEntry.uniqueKeys == null') - // } - // if (queueEntry.globalModification === true) { - // throw new Error('tellCorrespondingNodesFinalData globalModification === true') - // } - - // if (this.executeInOneShard && queueEntry.isInExecutionHome === false) { - // throw new Error('tellCorrespondingNodesFinalData isInExecutionHome === false') - // } - // if (queueEntry.executionShardKey == null || queueEntry.executionShardKey == '') { - // throw new Error('tellCorrespondingNodesFinalData executionShardKey == null or empty') - // } - // if (queueEntry.preApplyTXResult == null) { - // throw new Error('tellCorrespondingNodesFinalData preApplyTXResult == null') - // } - - // // Report data to corresponding nodes - // const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData - // let correspondingAccNodes: Shardus.Node[] = [] - // const datas: { [accountID: string]: Shardus.WrappedResponse } = {} - - // const applyResponse = queueEntry.preApplyTXResult.applyResponse - // let wrappedStates = this.stateManager.useAccountWritesOnly ? {} : queueEntry.collectedData - // const writtenAccountsMap: WrappedResponses = {} - // if (applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { - // for (const writtenAccount of applyResponse.accountWrites) { - // writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - // writtenAccountsMap[writtenAccount.accountId].prevStateId = wrappedStates[writtenAccount.accountId] - // ? wrappedStates[writtenAccount.accountId].stateId - // : '' - // writtenAccountsMap[writtenAccount.accountId].prevDataCopy = wrappedStates[writtenAccount.accountId] - // ? utils.deepCopy(writtenAccount.data) - // : {} - - // datas[writtenAccount.accountId] = writtenAccount.data - // } - // //override wrapped states with writtenAccountsMap which should be more complete if it included - // wrappedStates = writtenAccountsMap - // } - // const keysToShare = Object.keys(wrappedStates) - - // let message: { stateList: Shardus.WrappedResponse[]; txid: string } - // let edgeNodeIds = [] - // let consensusNodeIds = [] - - // const localHomeNode = queueEntry.homeNodes[queueEntry.executionShardKey] - - // let nodesToSendTo: StringNodeObjectMap = {} - // let doOnceNodeAccPair = new Set() //can skip node+acc if it happens more than once. - - // //let uniqueAccountsShared = 0 - // let totalShares = 0 - // for (const key of keysToShare) { - // nodesToSendTo = {} - // doOnceNodeAccPair = new Set() - - // // eslint-disable-next-line security/detect-object-injection - // if (wrappedStates[key] != null) { - // // eslint-disable-next-line security/detect-object-injection - // let accountHomeNode = queueEntry.homeNodes[key] - - // if (accountHomeNode == null) { - // accountHomeNode = ShardFunctions.findHomeNode( - // this.stateManager.currentCycleShardData.shardGlobals, - // key, - // this.stateManager.currentCycleShardData.parititionShardDataMap - // ) - // nestedCountersInstance.countEvent('stateManager', 'fetch missing home info') - // } - // if (accountHomeNode == null) { - // throw new Error('tellCorrespondingNodesFinalData: should never get here. accountHomeNode == null') - // } - - // edgeNodeIds = [] - // consensusNodeIds = [] - // correspondingAccNodes = [] - - // if (queueEntry.ourExGroupIndex === -1) { - // throw new Error( - // 'tellCorrespondingNodesFinalData: should never get here. our sending node must be in the execution group' - // ) - // } - - // const ourLocalExecutionSetIndex = queueEntry.ourExGroupIndex - // const ourSendingGroupSize = queueEntry.executionGroupMap.size - - // const consensusListSize = accountHomeNode.consensusNodeForOurNodeFull.length - // const edgeListSize = accountHomeNode.edgeNodes.length - // const pachedListSize = accountHomeNode.patchedOnNodes.length - - // // must add one to each lookup index! - // const indicies = ShardFunctions.debugFastStableCorrespondingIndicies( - // ourSendingGroupSize, - // consensusListSize, - // ourLocalExecutionSetIndex + 1 - // ) - // const edgeIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( - // ourSendingGroupSize, - // edgeListSize, - // ourLocalExecutionSetIndex + 1 - // ) - - // let patchIndicies = [] - // if (accountHomeNode.patchedOnNodes.length > 0) { - // patchIndicies = ShardFunctions.debugFastStableCorrespondingIndicies( - // ourSendingGroupSize, - // pachedListSize, - // ourLocalExecutionSetIndex + 1 - // ) - // } - - // // for each remote node lets save it's id - // for (const index of indicies) { - // const node = accountHomeNode.consensusNodeForOurNodeFull[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - // if (node != null && node.id !== ourNodeData.node.id) { - // nodesToSendTo[node.id] = node - // consensusNodeIds.push(node.id) - // } - // } - // for (const index of edgeIndicies) { - // const node = accountHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - // if (node != null && node.id !== ourNodeData.node.id) { - // nodesToSendTo[node.id] = node - // edgeNodeIds.push(node.id) - // } - // } - - // for (const index of patchIndicies) { - // const node = accountHomeNode.edgeNodes[index - 1] // fastStableCorrespondingIndicies is one based so adjust for 0 based array - // if (node != null && node.id !== ourNodeData.node.id) { - // nodesToSendTo[node.id] = node - // //edgeNodeIds.push(node.id) - // } - // } - - // for (const [accountID, node] of Object.entries(nodesToSendTo)) { - // const keyPair = accountID + key - // if (node != null && doOnceNodeAccPair.has(keyPair) === false) { - // doOnceNodeAccPair.add(keyPair) - // correspondingAccNodes.push(node) - // } - // } - - // //how can we be making so many calls?? - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodesFinalData', queueEntry.logID, `tellCorrespondingNodesFinalData nodesToSendTo:${Object.keys(nodesToSendTo).length} doOnceNodeAccPair:${doOnceNodeAccPair.size} indicies:${Utils.safeStringify(indicies)} edgeIndicies:${Utils.safeStringify(edgeIndicies)} patchIndicies:${Utils.safeStringify(patchIndicies)} doOnceNodeAccPair: ${Utils.safeStringify([...doOnceNodeAccPair.keys()])} ourLocalExecutionSetIndex:${ourLocalExecutionSetIndex} ourSendingGroupSize:${ourSendingGroupSize} consensusListSize:${consensusListSize} edgeListSize:${edgeListSize} pachedListSize:${pachedListSize}`) - - // const dataToSend: Shardus.WrappedResponse[] = [] - // // eslint-disable-next-line security/detect-object-injection - // dataToSend.push(datas[key]) // only sending just this one key at a time - // message = { stateList: dataToSend, txid: queueEntry.acceptedTx.txId } - // if (correspondingAccNodes.length > 0) { - // const remoteRelation = ShardFunctions.getNodeRelation( - // accountHomeNode, - // this.stateManager.currentCycleShardData.ourNode.id - // ) - // const localRelation = ShardFunctions.getNodeRelation( - // localHomeNode, - // this.stateManager.currentCycleShardData.ourNode.id - // ) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('tellCorrespondingNodesFinalData', queueEntry.logID, `remoteRel: ${remoteRelation} localrel: ${localRelation} qId: ${queueEntry.entryID} AccountBeingShared: ${utils.makeShortHash(key)} EdgeNodes:${utils.stringifyReduce(edgeNodeIds)} ConsesusNodes${utils.stringifyReduce(consensusNodeIds)}`) - - // // Filter nodes before we send tell() - // const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( - // correspondingAccNodes, - // 'tellCorrespondingNodesFinalData', - // true, - // true - // ) - // if (filteredNodes.length === 0) { - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('tellCorrespondingNodesFinalData: filterValidNodesForInternalMessage no valid nodes left to try') - // //return null - // continue - // } - // const filterdCorrespondingAccNodes = filteredNodes - // const filterNodesIpPort = filterdCorrespondingAccNodes.map( - // (node) => node.externalIp + ':' + node.externalPort - // ) - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.debug('tellcorrernodingnodesfinaldata', queueEntry.logID, ` : filterValidNodesForInternalMessage ${filterNodesIpPort} for accounts: ${utils.stringifyReduce(message.stateList)}`) - // // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.broadcastFinalStateBinary) { - // // convert legacy message to binary supported type - // const request = message as BroadcastFinalStateReq - // if (logFlags.seqdiagram) { - // for (const node of filterdCorrespondingAccNodes) { - // /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_finalstate'}`) - // } - // } - - // this.p2p.tellBinary( - // filterdCorrespondingAccNodes, - // InternalRouteEnum.binary_broadcast_finalstate, - // request, - // serializeBroadcastFinalStateReq, - // { - // verification_data: verificationDataCombiner( - // message.txid, - // message.stateList.length.toString() - // ), - // } - // ) - // // } else { - // // this.p2p.tell(filterdCorrespondingAccNodes, 'broadcast_finalstate', message) - // // } - // totalShares++ - // } - // } - // } - - // nestedCountersInstance.countEvent('tellCorrespondingNodesFinalData', 'totalShares', totalShares) - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`tellCorrespondingNodesFinalData - end: ${queueEntry.logID} totalShares:${totalShares}`) - // profilerInstance.profileSectionEnd('tellCorrespondingNodesFinalData', true) - // } - - factTellCorrespondingNodesFinalData(queueEntry: QueueEntry): void { - profilerInstance.profileSectionStart('factTellCorrespondingNodesFinalData', true) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('factTellCorrespondingNodesFinalData', queueEntry.logID, `factTellCorrespondingNodesFinalData - start: ${queueEntry.logID}`) - - if (this.stateManager.currentCycleShardData == null) { - throw new Error('factTellCorrespondingNodesFinalData: currentCycleShardData == null') - } - if (queueEntry.uniqueKeys == null) { - throw new Error('factTellCorrespondingNodesFinalData: queueEntry.uniqueKeys == null') - } - if (queueEntry.globalModification === true) { - throw new Error('factTellCorrespondingNodesFinalData globalModification === true') - } - - if (this.executeInOneShard && queueEntry.isInExecutionHome === false) { - throw new Error('factTellCorrespondingNodesFinalData isInExecutionHome === false') - } - if (queueEntry.executionShardKey == null || queueEntry.executionShardKey == '') { - throw new Error('factTellCorrespondingNodesFinalData executionShardKey == null or empty') - } - if (queueEntry.preApplyTXResult == null) { - throw new Error('factTellCorrespondingNodesFinalData preApplyTXResult == null') - } - - const datas: { [accountID: string]: Shardus.WrappedResponse } = {} - - const applyResponse = queueEntry.preApplyTXResult.applyResponse - let wrappedStates = this.stateManager.useAccountWritesOnly ? {} : queueEntry.collectedData - const writtenAccountsMap: WrappedResponses = {} - if (applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { - for (const writtenAccount of applyResponse.accountWrites) { - writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - writtenAccountsMap[writtenAccount.accountId].prevStateId = wrappedStates[writtenAccount.accountId] - ? wrappedStates[writtenAccount.accountId].stateId - : '' - writtenAccountsMap[writtenAccount.accountId].prevDataCopy = wrappedStates[writtenAccount.accountId] - ? utils.deepCopy(writtenAccount.data) - : {} - - datas[writtenAccount.accountId] = writtenAccount.data - } - //override wrapped states with writtenAccountsMap which should be more complete if it included - wrappedStates = writtenAccountsMap - } - const keysToShare = Object.keys(wrappedStates) - - let message: { stateList: Shardus.WrappedResponse[]; txid: string } - - let totalShares = 0 - const targetStartIndex = 0 - const targetEndIndex = queueEntry.transactionGroup.length - const targetGroupSize = queueEntry.transactionGroup.length - - const senderIndexInTxGroup = queueEntry.ourTXGroupIndex - const senderGroupSize = queueEntry.executionGroup.length - const unwrappedIndex = queueEntry.isSenderWrappedTxGroup[Self.id] - - let correspondingIndices = getCorrespondingNodes( - senderIndexInTxGroup, - targetStartIndex, - targetEndIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length, - queueEntry.logID - ) - - if (this.config.stateManager.correspondingTellUseUnwrapped) { - if (unwrappedIndex != null) { - const extraCorrespondingIndices = getCorrespondingNodes( - unwrappedIndex, - targetStartIndex, - targetEndIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - queueEntry.transactionGroup.length, - queueEntry.logID - ) - if (Context.config.stateManager.concatCorrespondingTellUseUnwrapped) { - correspondingIndices.concat(extraCorrespondingIndices) - } else { - correspondingIndices = extraCorrespondingIndices - } - } - } - - for (const key of keysToShare) { - // eslint-disable-next-line security/detect-object-injection - if (wrappedStates[key] != null) { - if (queueEntry.ourExGroupIndex === -1) { - throw new Error( - 'factTellCorrespondingNodesFinalData: should never get here. our sending node must be in the execution group' - ) - } - const storageNodesForAccount = this.getStorageGroupForAccount(key) - const storageNodesAccountIds = new Set(storageNodesForAccount.map((node) => node.id)) - - const correspondingNodes: P2PTypes.NodeListTypes.Node[] = [] - for (const index of correspondingIndices) { - const node = queueEntry.transactionGroup[index] - if (storageNodesAccountIds.has(node.id)) { - correspondingNodes.push(node) - } - } - - //how can we be making so many calls?? - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) { - this.logger.playbackLogNote('factTellCorrespondingNodesFinalData', queueEntry.logID, `factTellCorrespondingNodesFinalData ourIndex: ${senderIndexInTxGroup} correspondingIndices:${JSON.stringify(correspondingIndices)} correspondingNodes:${JSON.stringify(correspondingNodes.map(node => node.id))} for accounts: ${key}`) - } - - const dataToSend: Shardus.WrappedResponse[] = [] - // eslint-disable-next-line security/detect-object-injection - dataToSend.push(datas[key]) // only sending just this one key at a time - message = { stateList: dataToSend, txid: queueEntry.acceptedTx.txId } - if (correspondingNodes.length > 0) { - // Filter nodes before we send tell() - const filteredNodes = this.stateManager.filterValidNodesForInternalMessage( - correspondingNodes, - 'factTellCorrespondingNodesFinalData', - true, - true - ) - if (filteredNodes.length === 0) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('factTellCorrespondingNodesFinalData: filterValidNodesForInternalMessage no valid nodes left to try') - //return null - continue - } - const filterdCorrespondingAccNodes = filteredNodes - const filterNodesIpPort = filterdCorrespondingAccNodes.map( - (node) => node.externalIp + ':' + node.externalPort - ) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.debug('tellcorrernodingnodesfinaldata', queueEntry.logID, ` : filterValidNodesForInternalMessage ${filterNodesIpPort} for accounts: ${utils.stringifyReduce(message.stateList)}`) - // convert legacy message to binary supported type - const request = message as BroadcastFinalStateReq - if (logFlags.seqdiagram) { - for (const node of filterdCorrespondingAccNodes) { - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455102 ${shardusGetTime()} tx:${message.txid} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(node.id)}: ${'broadcast_finalstate'}`) - } - } - - // if (this.usePOQo) { - // && this.config.p2p.useBinarySerializedEndpoints && Context.config.p2p.poqoDataAndReceiptBinary) { - this.p2p.tellBinary( - filterdCorrespondingAccNodes, - InternalRouteEnum.binary_poqo_data_and_receipt, - { - finalState: message, - receipt: queueEntry.signedReceipt, - txGroupCycle: queueEntry.txGroupCycle, - }, - serializePoqoDataAndReceiptReq, - {} - ) - // } else if (this.usePOQo) { - // this.p2p.tell( - // filterdCorrespondingAccNodes, - // 'poqo-data-and-receipt', - // { - // finalState: message, - // receipt: queueEntry.appliedReceipt2 - // } - // ) - // } else //if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.broadcastFinalStateBinary) { - // this.p2p.tellBinary( - // filterdCorrespondingAccNodes, - // InternalRouteEnum.binary_broadcast_finalstate, - // request, - // serializeBroadcastFinalStateReq, - // { - // verification_data: verificationDataCombiner( - // message.txid, - // message.stateList.length.toString() - // ), - // } - // ) - // } else { - // this.p2p.tell(filterdCorrespondingAccNodes, 'broadcast_finalstate', message) - // } - totalShares++ - } - } - } - - nestedCountersInstance.countEvent('factTellCorrespondingNodesFinalData', 'totalShares', totalShares) - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factTellCorrespondingNodesFinalData - end: ${queueEntry.logID} totalShares:${totalShares}`) - profilerInstance.profileSectionEnd('factTellCorrespondingNodesFinalData', true) - } - - factValidateCorrespondingTellFinalDataSender(queueEntry: QueueEntry, senderNodeId: string): boolean { - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`factValidateCorrespondingTellFinalDataSender: txId: ${queueEntry.acceptedTx.txId} sender node id: ${senderNodeId}, receiver id: ${Self.id}`) - const senderNode = NodeList.nodes.get(senderNodeId) - if (senderNode === null) { - /* prettier-ignore */ if(logFlags.error) this.mainLogger.error(`factValidateCorrespondingTellFinalDataSender: logId: ${queueEntry.logID} sender node is null`) - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellFinalDataSender: sender node is null' - ) - return false - } - const senderIsInExecutionGroup = queueEntry.executionGroupMap.has(senderNodeId) - - if (senderIsInExecutionGroup === false) { - /* prettier-ignore */ if(logFlags.error) this.mainLogger.error(`factValidateCorrespondingTellFinalDataSender: logId: ${queueEntry.logID} sender is not in the execution group sender:${senderNodeId}`) - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellFinalDataSender: sender is not in the execution group' - ) - return false - } - - let senderNodeIndex = queueEntry.transactionGroup.findIndex((node) => node.id === senderNodeId) - if (queueEntry.isSenderWrappedTxGroup[senderNodeId] != null) { - senderNodeIndex = queueEntry.isSenderWrappedTxGroup[senderNodeId] - } - const senderGroupSize = queueEntry.executionGroup.length - - const targetNodeIndex = queueEntry.ourTXGroupIndex // we are the receiver - const targetGroupSize = queueEntry.transactionGroup.length - const targetStartIndex = 0 // start of tx group - const targetEndIndex = queueEntry.transactionGroup.length // end of tx group - - // check if it is a FACT sender - const isValidFactSender = verifyCorrespondingSender( - targetNodeIndex, - senderNodeIndex, - queueEntry.correspondingGlobalOffset, - targetGroupSize, - senderGroupSize, - targetStartIndex, - targetEndIndex, - queueEntry.transactionGroup.length - ) - - // it is not a FACT corresponding node - if (isValidFactSender === false) { - /* prettier-ignore */ if(logFlags.error) this.mainLogger.error(`factValidateCorrespondingTellFinalDataSender: logId: ${queueEntry.logID} sender is not a valid sender isValidSender: ${isValidFactSender}`); - nestedCountersInstance.countEvent( - 'stateManager', - 'factValidateCorrespondingTellFinalDataSender: sender is not a valid sender or a neighbour node' - ) - return false - } - return true - } - - dumpTxDebugToStatList(queueEntry: QueueEntry): void { - this.txDebugStatList.set(queueEntry.acceptedTx.txId, { ...queueEntry.txDebug }) - } - - clearTxDebugStatList(): void { - this.txDebugStatList.clear() - } - - printTxDebugByTxId(txId: string): string { - // get the txStat from the txDebugStatList - const txStat = this.txDebugStatList.get(txId) - if (txStat == null) { - return 'No txStat found' - } - let resultStr = '' - for (const key in txStat.duration) { - resultStr += `${key}: start:${txStat.startTimestamp[key]} end:${txStat.endTimestamp[key]} ${txStat.duration[key]} ms\n` - } - return resultStr - } - - printTxDebug(): string { - const collector = {} - const totalTxCount = this.txDebugStatList.size() - - const indexes = [ - 'aging', - 'processing', - 'awaiting data', - 'preApplyTransaction', - 'consensing', - 'commiting', - 'await final data', - 'expired', - 'total_queue_time', - 'pass', - 'fail', - ] - - /* eslint-disable security/detect-object-injection */ - for (const [txId, txStat] of this.txDebugStatList.entries()) { - for (const key in txStat.duration) { - if (!collector[key]) { - collector[key] = {} - for (const bucket of txStatBucketSize.default) { - collector[key][bucket] = [] - } - } - const duration = txStat.duration[key] - for (const bucket of txStatBucketSize.default) { - if (duration < bucket) { - collector[key][bucket].push(duration) - break - } - } - } - } - const sortedCollector = {} - for (const key of indexes) { - sortedCollector[key] = { ...collector[key] } - } - /* eslint-enable security/detect-object-injection */ - const lines = [] - lines.push(`=> Total Transactions: ${totalTxCount}`) - for (const [key, collectorForThisKey] of Object.entries(sortedCollector)) { - lines.push(`\n => Tx ${key}: \n`) - for (let i = 0; i < Object.keys(collectorForThisKey).length; i++) { - // eslint-disable-next-line security/detect-object-injection - const time = Object.keys(collectorForThisKey)[i] - // eslint-disable-next-line security/detect-object-injection - const arr = collectorForThisKey[time] - if (!arr) continue - const percentage = (arr.length / totalTxCount) * 100 - const blockCount = Math.round(percentage / 2) - const blockStr = '|'.repeat(blockCount) - const lowerLimit = i === 0 ? 0 : Object.keys(collectorForThisKey)[i - 1] - const upperLimit = time - const bucketDescription = `${lowerLimit} ms - ${upperLimit} ms:`.padEnd(19, ' ') - lines.push(`${bucketDescription} ${arr.length} ${percentage.toFixed(1).padEnd(5, ' ')}% ${blockStr} `) - } - } - - const strToPrint = lines.join('\n') - return strToPrint - } - - /** - * removeFromQueue remove an item from the queue and place it in the archivedQueueEntries list for awhile in case we have to access it again - * @param {QueueEntry} queueEntry - * @param {number} currentIndex - */ - removeFromQueue(queueEntry: QueueEntry, currentIndex: number, archive = true): void { - // end all the pending txDebug timers - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: removed`) - for (const key in queueEntry.txDebug.startTime) { - if (queueEntry.txDebug.startTime[key] != null) { - this.txDebugMarkEndTime(queueEntry, key) - } - } - // this.txDebugMarkEndTime(queueEntry, 'total_queue_time') - this.stateManager.eventEmitter.emit('txPopped', queueEntry.acceptedTx.txId) - if (queueEntry.txDebug) this.dumpTxDebugToStatList(queueEntry) - this._transactionQueue.splice(currentIndex, 1) - this._transactionQueueByID.delete(queueEntry.acceptedTx.txId) - - if (archive === false) { - if (logFlags.debug) this.mainLogger.debug(`removeFromQueue: ${queueEntry.logID} done. No archive`) - return - } - - queueEntry.archived = true - //compact the queue entry before we push it! - queueEntry.ourVote = null - queueEntry.collectedVotes = null - - // coalesce the receipts into applied receipt. maybe not as descriptive, but save memory. - queueEntry.appliedReceipt = - queueEntry.appliedReceipt ?? - queueEntry.recievedAppliedReceipt ?? - queueEntry.appliedReceiptForRepair ?? - queueEntry.appliedReceiptFinal - queueEntry.recievedAppliedReceipt = null - queueEntry.appliedReceiptForRepair = null - queueEntry.appliedReceiptFinal = queueEntry.appliedReceipt - - delete queueEntry.recievedAppliedReceipt - delete queueEntry.appliedReceiptForRepair - - // coalesce the receipt2s into applied receipt. maybe not as descriptive, but save memory. - queueEntry.recievedAppliedReceipt2 = null - queueEntry.appliedReceiptForRepair2 = null - - delete queueEntry.recievedAppliedReceipt2 - delete queueEntry.appliedReceiptForRepair2 - - queueEntry.signedReceipt = - queueEntry.signedReceipt ?? - queueEntry.receivedSignedReceipt ?? - queueEntry.signedReceiptForRepair ?? - queueEntry.signedReceiptFinal - queueEntry.receivedSignedReceipt = null - queueEntry.signedReceiptForRepair = null - queueEntry.signedReceiptFinal = queueEntry.signedReceipt - - delete queueEntry.receivedSignedReceipt - delete queueEntry.signedReceiptForRepair - - //delete queueEntry.appliedReceiptFinal - - //delete queueEntry.preApplyTXResult //turn this off for now, until we can do some refactor of queueEntry.preApplyTXResult.applyResponse - - this.archivedQueueEntries.push(queueEntry) - - this.archivedQueueEntriesByID.set(queueEntry.acceptedTx.txId, queueEntry) - // period cleanup will usually get rid of these sooner if the list fills up - if (this.archivedQueueEntries.length > this.archivedQueueEntryMaxCount) { - this.archivedQueueEntriesByID.delete(this.archivedQueueEntries[0].acceptedTx.txId) - this.archivedQueueEntries.shift() - } - if (logFlags.debug) this.mainLogger.debug(`removeFromQueue: ${queueEntry.logID} and added to archive done`) - } - - /*** - * ######## ######## ####### ###### ######## ###### ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## - * ######## ######## ## ## ## ###### ###### ###### - * ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ####### ###### ######## ###### ###### - */ - /** - * Run our main processing queue untill there is nothing that we can do - * old name: processAcceptedTxQueue - * @param firstTime - * @returns - */ - async processTransactions(firstTime = false): Promise { - const seenAccounts: SeenAccounts = {} - let pushedProfilerTag = null - const startTime = shardusGetTime() - - const processStats: ProcessQueueStats = { - totalTime: 0, - inserted: 0, - sameState: 0, - stateChanged: 0, - //expired:0, - sameStateStats: {}, - stateChangedStats: {}, - awaitStats: {}, - } - - //this may help in the case where the queue has halted - this.lastProcessStats['current'] = processStats - - this.queueReads = new Set() - this.queueWrites = new Set() - this.queueReadWritesOld = new Set() - - try { - nestedCountersInstance.countEvent('processing', 'processing-enter') - - if (this.pendingTransactionQueue.length > 5000) { - /* prettier-ignore */ nestedCountersInstance.countEvent( 'stateManager', `newAcceptedTxQueueTempInjest>5000 leftRunning:${this.transactionProcessingQueueRunning} noShardCalcs:${ this.stateManager.currentCycleShardData == null } ` ) - - //report rare counter once - if (this.largePendingQueueReported === false) { - this.largePendingQueueReported = true - /* prettier-ignore */ nestedCountersInstance.countRareEvent( 'stateManager', `newAcceptedTxQueueTempInjest>5000 leftRunning:${this.transactionProcessingQueueRunning} noShardCalcs:${ this.stateManager.currentCycleShardData == null } ` ) - } - } - - if (this.transactionProcessingQueueRunning === true) { - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'newAcceptedTxQueueRunning === true') - return - } - this.transactionProcessingQueueRunning = true - this.isStuckProcessing = false - this.debugLastProcessingQueueStartTime = shardusGetTime() - - // ensure there is some rest between processing loops - const timeSinceLastRun = startTime - this.processingLastRunTime - if (timeSinceLastRun < this.processingMinRunBreak) { - const sleepTime = Math.max(5, this.processingMinRunBreak - timeSinceLastRun) - await utils.sleep(sleepTime) - nestedCountersInstance.countEvent('processing', 'resting') - } - - if (this.transactionQueueHasRemainingWork && timeSinceLastRun > 500) { - /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal(`processAcceptedTxQueue left busy and waited too long to restart`, `processAcceptedTxQueue left busy and waited too long to restart ${timeSinceLastRun / 1000} `) - } - - this.profiler.profileSectionStart('processQ') - - if (logFlags.seqdiagram) - this.mainLogger.info( - `0x10052024 ${ipInfo.externalIp} ${shardusGetTime()} 0x0000 processTransactions _transactionQueue.length ${ - this._transactionQueue.length - }` - ) - - if (this.stateManager.currentCycleShardData == null) { - nestedCountersInstance.countEvent('stateManager', 'currentCycleShardData == null early exit') - return - } - - if (this._transactionQueue.length === 0 && this.pendingTransactionQueue.length === 0) { - return - } - - if (this.queueRestartCounter == null) { - this.queueRestartCounter = 0 - } - this.queueRestartCounter++ - - const localRestartCounter = this.queueRestartCounter - - const timeM = this.stateManager.queueSitTime - // const timeM2 = timeM * 2 // 12s - // const timeM2_5 = timeM * 2.25 // 13.5s - // const timeM3 = timeM * 2.5 // 15s - const timeM2 = timeM * 2 - const timeM2_5 = timeM * 2.5 - const timeM3 = timeM * 3 - const timeM5 = timeM * 5 - let currentTime = shardusGetTime() - - const app = this.app - - // process any new queue entries that were added to the temporary list - if (this.pendingTransactionQueue.length > 0) { - for (const txQueueEntry of this.pendingTransactionQueue) { - nestedCountersInstance.countEvent('stateManager', 'processAcceptedTxQueue injest: kept TX') - - const timestamp = txQueueEntry.txKeys.timestamp - const acceptedTx = txQueueEntry.acceptedTx - const txId = acceptedTx.txId - // Find the time sorted spot in our queue to insert this TX into - // reverse loop because the news (largest timestamp) values are at the end of the array - // todo faster version (binary search? to find where we need to insert) - let index = this._transactionQueue.length - 1 - // eslint-disable-next-line security/detect-object-injection - let lastTx = this._transactionQueue[index] - while ( - index >= 0 && - (timestamp > lastTx.txKeys.timestamp || - (timestamp === lastTx.txKeys.timestamp && txId < lastTx.acceptedTx.txId)) - ) { - index-- - // eslint-disable-next-line security/detect-object-injection - lastTx = this._transactionQueue[index] - } - - const age = shardusGetTime() - timestamp - if (age > timeM * 0.9) { - // IT turns out the correct thing to check is didSync flag only report errors if we did not wait on this TX while syncing - if (txQueueEntry.didSync == false) { - /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal(`processAcceptedTxQueue_oldTX.9 fromClient:${txQueueEntry.fromClient}`, `processAcceptedTxQueue cannot accept tx older than 0.9M ${timestamp} age: ${age} fromClient:${txQueueEntry.fromClient}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_processAcceptedTxQueueTooOld1', `${utils.makeShortHash(txQueueEntry.acceptedTx.txId)}`, 'processAcceptedTxQueue working on older tx ' + timestamp + ' age: ' + age) - //txQueueEntry.waitForReceiptOnly = true - } - } - if (age > timeM) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_processAcceptedTxQueueTooOld2', `${utils.makeShortHash(txQueueEntry.acceptedTx.txId)}`, 'processAcceptedTxQueue working on older tx ' + timestamp + ' age: ' + age) - nestedCountersInstance.countEvent('processing', 'txExpired1 > M. waitForReceiptOnly') - txQueueEntry.waitForReceiptOnly = true - if (this.config.stateManager.txStateMachineChanges) { - this.updateTxState(txQueueEntry, 'await final data', 'processTx1') - } else { - this.updateTxState(txQueueEntry, 'consensing') - } - } - - // do not injest tranactions that are long expired. there could be 10k+ of them if we are restarting the processing queue - if (age > timeM3 * 5 && this.stateManager.config.stateManager.discardVeryOldPendingTX === true) { - nestedCountersInstance.countEvent('txExpired', 'txExpired3 > M3 * 5. pendingTransactionQueue') - - // let hasApplyReceipt = txQueueEntry.appliedReceipt != null - // let hasReceivedApplyReceipt = txQueueEntry.recievedAppliedReceipt != null - - // const shortID = txQueueEntry.logID //`${utils.makeShortHash(queueEntry.acceptedTx.id)}` - // //const hasReceipt = receipt2 != null - - // hasApplyReceipt = txQueueEntry.appliedReceipt2 != null - // hasReceivedApplyReceipt = txQueueEntry.recievedAppliedReceipt2 != null - - // this.statemanager_fatal( - // `txExpired3 > M3. pendingTransactionQueue`, - // `txExpired txAge > timeM3 pendingTransactionQueue ` + - // `txid: ${shortID} state: ${txQueueEntry.state} hasAll:${txQueueEntry.hasAll} applyReceipt:${hasApplyReceipt} recievedAppliedReceipt:${hasReceivedApplyReceipt} age:${age}` - // ) - - // //probably some alternative TX queue cleanup that should happen similar to setTXExpired - // //this.setTXExpired(queueEntry, currentIndex, 'm3 general') - - continue - } - - txQueueEntry.approximateCycleAge = this.stateManager.currentCycleShardData.cycleNumber - //insert this tx into the main queue - this._transactionQueue.splice(index + 1, 0, txQueueEntry) - this._transactionQueueByID.set(txQueueEntry.acceptedTx.txId, txQueueEntry) - - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455105 ${shardusGetTime()} tx:${txQueueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: aging`) - - processStats.inserted++ - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_addToQueue', `${txId}`, `AcceptedTransaction: ${txQueueEntry.logID} ts: ${txQueueEntry.txKeys.timestamp} acc: ${utils.stringifyReduce(txQueueEntry.txKeys.allKeys)} indexInserted: ${index + 1}`) - this.stateManager.eventEmitter.emit('txQueued', acceptedTx.txId) - } - this.pendingTransactionQueue = [] - this.pendingTransactionQueueByID.clear() - } - - let currentIndex = this._transactionQueue.length - 1 - - let lastLog = 0 - currentIndex++ //increment once so we can handle the decrement at the top of the loop and be safe about continue statements - - let lastRest = shardusGetTime() - while (this._transactionQueue.length > 0) { - // update current time with each pass through the loop - currentTime = shardusGetTime() - - if (currentTime - lastRest > 1000) { - //add a brief sleep if we have been in this loop for a long time - nestedCountersInstance.countEvent('processing', 'forcedSleep') - await utils.sleep(5) //5ms sleep - lastRest = currentTime - - if ( - currentTime - this.stateManager.currentCycleShardData.calculationTime > - this.config.p2p.cycleDuration * 1000 + 5000 - ) { - nestedCountersInstance.countEvent('processing', 'old cycle data >5s past due') - } - if ( - currentTime - this.stateManager.currentCycleShardData.calculationTime > - this.config.p2p.cycleDuration * 1000 + 11000 - ) { - nestedCountersInstance.countEvent('processing', 'very old cycle data >11s past due') - return //loop will restart. - } - } - - //Handle an odd case where the finally did not catch exiting scope. - if (pushedProfilerTag != null) { - this.profiler.profileSectionEnd(`process-${pushedProfilerTag}`) - this.profiler.profileSectionEnd(`process-patched1-${pushedProfilerTag}`) - pushedProfilerTag = null - } - - currentIndex-- - if (currentIndex < 0) { - break - } - - this.clearDebugAwaitStrings() - - // eslint-disable-next-line security/detect-object-injection - const queueEntry: QueueEntry | undefined = this._transactionQueue[currentIndex] - if (queueEntry == null) { - this.statemanager_fatal(`queueEntry is null`, `currentIndex:${currentIndex}`) - nestedCountersInstance.countEvent('processing', 'error: null queue entry. skipping to next TX') - continue - } - if (logFlags.seqdiagram) - this.mainLogger.info( - `0x10052024 ${ipInfo.externalIp} ${shardusGetTime()} 0x0001 currentIndex:${currentIndex} txId:${ - queueEntry.acceptedTx.txId - } state:${queueEntry.state}` - ) - const txTime = queueEntry.txKeys.timestamp - const txAge = currentTime - txTime - - this.debugRecentQueueEntry = queueEntry - - // current queue entry is younger than timeM, so nothing to do yet. - if (txAge < timeM) { - break - } - - if (localRestartCounter < this.queueRestartCounter && lastLog !== this.queueRestartCounter) { - lastLog = this.queueRestartCounter - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('queueRestart_error', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} qrstGlobal:${this.queueRestartCounter}}`) - } - - this.stateManager.debugTXHistory[queueEntry.logID] = queueEntry.state - const hasApplyReceipt = queueEntry.signedReceipt != null - const hasReceivedApplyReceipt = queueEntry.receivedSignedReceipt != null - const hasReceivedApplyReceiptForRepair = queueEntry.signedReceiptForRepair != null - const shortID = queueEntry.logID //`${utils.makeShortHash(queueEntry.acceptedTx.id)}` - - // on the off chance we are here with a pass of fail state remove this from the queue. - // log fatal because we do not want to get to this situation. - if (queueEntry.state === 'pass' || queueEntry.state === 'fail') { - this.statemanager_fatal( - `pass or fail entry should not be in queue`, - `txid: ${shortID} state: ${queueEntry.state} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}` - ) - this.removeFromQueue(queueEntry, currentIndex) - continue - } - - //turn off all this logic to futher simplify things - if (this.queueTimingFixes === false) { - // TIME OUT / EXPIRATION CHECKS - // Check if transactions have expired and failed, or if they have timed out and ne need to request receipts. - if (this.stateManager.accountSync.dataSyncMainPhaseComplete === true) { - // Everything in here is after we finish our initial sync - - // didSync: refers to the syncing process. True is for TXs that we were notified of - // but had to delay action on because the initial or a runtime thread was busy syncing on. - - // For normal didSync===false TXs we are expiring them after M3*2 - // This gives a bit of room to attempt a repair. - // if a repair or reciept process fails there are cases below to expire the the - // tx as early as time > M3 - if (txAge > timeM3 * 2 && queueEntry.didSync == false) { - //this.statistics.incrementCounter('txExpired') - //let seenInQueue = this.processQueue_accountSeen(seenAccounts, queueEntry) - - this.statemanager_fatal( - `txExpired1 > M3 * 2. NormalTX Timed out.`, - `txExpired txAge > timeM3*2 && queueEntry.didSync == false. ` + - `txid: ${shortID} state: ${ - queueEntry.state - } applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${ - queueEntry.receiptEverRequested - } age:${txAge} ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}` - ) - if (queueEntry.receiptEverRequested && queueEntry.globalModification === false) { - this.statemanager_fatal( - `txExpired1 > M3 * 2 -!receiptEverRequested`, - `txExpired txAge > timeM3*2 && queueEntry.didSync == false. !receiptEverRequested ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}` - ) - } - if (queueEntry.globalModification) { - this.statemanager_fatal( - `txExpired1 > M3 * 2 -GlobalModification!!`, - `txExpired txAge > timeM3*2 && queueEntry.didSync == false. !receiptEverRequested ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}` - ) - } - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3 * 2. NormalTX Timed out. didSync == false. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) - - if (configContext.stateManager.disableTxExpiration === false) { - this.setTXExpired(queueEntry, currentIndex, 'old, timeM3 * 2') - continue - } - } - - //This case should not happen now. but we may add it back in later. - //TXs that synced get much longer to have a chance to repair - // if (txAge > timeM3 * 50 && queueEntry.didSync == true) { - // //this.statistics.incrementCounter('txExpired') - - // this.statemanager_fatal(`txExpired2 > M3 * 50. SyncedTX Timed out.`, `txExpired txAge > timeM3 * 50 && queueEntry.didSync == true. ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} recievedAppliedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge} syncCounter${queueEntry.syncCounter} ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}`) - // if (queueEntry.globalModification) { - // this.statemanager_fatal(`txExpired2 > M3 * 50. SyncedTX -GlobalModification!!`, `txExpired txAge > timeM3*2 && queueEntry.didSync == false. !receiptEverRequested ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} recievedAppliedReceipt:${hasReceivedApplyReceipt} hasReceivedApplyReceiptForRepair:${hasReceivedApplyReceiptForRepair} receiptEverRequested:${queueEntry.receiptEverRequested} age:${txAge}`) - // } - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 2 ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) - // /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.recievedAppliedReceipt 2: ${utils.stringifyReduce(queueEntry.recievedAppliedReceipt)}`) - - // /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3 * 50. SyncedTX Timed out. didSync == true. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) - // this.updateTxState(queueEntry, 'expired') - // this.removeFromQueue(queueEntry, currentIndex) - // continue - // } - - // lots of logic about when we can repair or not repair/when to wait etc. - if (this.queueTimingFixes === false) { - // This is the expiry case where requestingReceiptFailed - if (txAge > timeM3 && queueEntry.requestingReceiptFailed) { - //this.statistics.incrementCounter('txExpired') - - this.statemanager_fatal( - `txExpired3 > M3. receiptRequestFail after Timed Out`, - `txExpired txAge > timeM3 && queueEntry.requestingReceiptFailed ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 3 requestingReceiptFailed ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt 3 requestingReceiptFailed: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3. receiptRequestFail after Timed Out. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) - - if (configContext.stateManager.disableTxExpiration === false) { - this.setTXExpired(queueEntry, currentIndex, 'old, timeM3, requestingReceiptFailed') - continue - } - } - - // This is the expiry case where repairFailed - // TODO. I think as soon as a repair as marked as failed we can expire and remove it from the queue - // But I am leaving this optimizaiton out for now since we really don't want to plan on repairs failing - if (txAge > timeM3 && queueEntry.repairFailed) { - //this.statistics.incrementCounter('txExpired') - - this.statemanager_fatal( - `txExpired3 > M3. repairFailed after Timed Out`, - `txExpired txAge > timeM3 && queueEntry.repairFailed ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 3 repairFailed ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt 3 repairFailed: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M3. repairFailed after Timed Out. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) - - if (configContext.stateManager.disableTxExpiration === false) { - this.setTXExpired(queueEntry, currentIndex, 'old, timeM3, repairFailed') - continue - } - } - - // a few cases to wait for a receipt or request a receipt - if (queueEntry.state != 'await repair' && queueEntry.state != 'commiting') { - //Not yet expired case: getting close to expire so just move to consensing and wait. - //Just wait for receipt only if we are awaiting data and it is getting late - if ( - txAge > timeM2_5 && - queueEntry.m2TimeoutReached === false && - queueEntry.globalModification === false && - queueEntry.requestingReceipt === false - ) { - if (queueEntry.state == 'awaiting data') { - // no receipt yet, and state not committing - if (queueEntry.receivedSignedReceipt == null && queueEntry.signedReceipt == null) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`Wait for reciept only: txAge > timeM2_5 txid:${shortID} `) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txMissingReceipt3', `${shortID}`, `processAcceptedTxQueue ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txMissingReceipt', `Wait for reciept only: txAge > timeM2.5. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) - queueEntry.waitForReceiptOnly = true - queueEntry.m2TimeoutReached = true - - if (this.config.stateManager.txStateMachineChanges) { - this.updateTxState(queueEntry, 'await final data', 'processTx2') - } else { - this.updateTxState(queueEntry, 'consensing') - } - continue - } - } - } - - //receipt requesting is not going to work with current timeouts. - if (queueEntry.requestingReceipt === true) { - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - continue - } - - // The TX technically expired past M3, but we will now request reciept in hope that we can repair the tx - if ( - txAge > timeM3 && - queueEntry.requestingReceiptFailed === false && - queueEntry.globalModification === false - ) { - if (this.stateManager.hasReceipt(queueEntry) === false && queueEntry.requestingReceipt === false) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.error) this.mainLogger.error(`txAge > timeM3 => ask for receipt now ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txMissingReceipt1', `txAge > timeM3 ${shortID}`, `syncNeedsReceipt ${shortID}`) - - const seen = this.processQueue_accountSeen(seenAccounts, queueEntry) - - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - this.queueEntryRequestMissingReceipt(queueEntry) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txMissingReceipt', `txAge > timeM3 => ask for receipt now. state:${queueEntry.state} globalMod:${queueEntry.globalModification} seen:${seen}`) - queueEntry.waitForReceiptOnly = true - queueEntry.m2TimeoutReached = true - - if (this.config.stateManager.txStateMachineChanges) { - this.updateTxState(queueEntry, 'await final data', 'processTx3') - } else { - this.updateTxState(queueEntry, 'consensing') - } - continue - } - } - } - } - } else { - //check for TX older than 30x M3 and expire them - if (txAge > timeM3 * 50) { - //this.statistics.incrementCounter('txExpired') - - this.statemanager_fatal( - `txExpired4`, - `Still on inital syncing. txExpired txAge > timeM3 * 50. ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired 4 ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.receivedSignedReceipt 4: ${utils.stringifyReduce(queueEntry.receivedSignedReceipt)}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `txExpired txAge > timeM3 * 50. still syncing. state:${queueEntry.state} globalMod:${queueEntry.globalModification}`) - - this.setTXExpired(queueEntry, currentIndex, 'old, timeM3 * 50!!') - continue - } - } - } - - if (this.queueTimingFixes === true) { - //if we are still waiting on an upstream TX at this stage in the pipeline, - //then kill the TX because there is not much hope for it - //This will help make way for other TXs with a better chance - if (queueEntry.state === 'processing' || queueEntry.state === 'awaiting data') { - if (this.processQueue_accountSeen(seenAccounts, queueEntry) === true) { - //adding txSieve time! - if (txAge > timeM2 + queueEntry.txSieveTime) { - if (configContext.stateManager.disableTxExpiration === false) { - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M2 canceled due to upstream TXs. state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification}`) - //todo only keep on for temporarliy - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M2 canceled due to upstream TXs. sieveT:${queueEntry.txSieveTime}`) - this.setTXExpired(queueEntry, currentIndex, 'm2, processing or awaiting') - if (configContext.stateManager.stuckTxQueueFix) continue // we need to skip this TX and move to the next one - } - if (configContext.stateManager.stuckTxQueueFix === false) continue - } - } - } - // check if we seen a vote or has a vote - const hasSeenVote = queueEntry.receivedBestVote != null || queueEntry.ourVote != null - const hasSeenConfirmation = queueEntry.receivedBestConfirmation != null - - // remove TXs that are stuck in the processing queue for 2 min - if ( - configContext.stateManager.removeStuckTxsFromQueue === true && - txAge > configContext.stateManager.stuckTxRemoveTime - ) { - nestedCountersInstance.countEvent( - 'txSafelyRemoved', - `stuck_in_consensus_1 ${configContext.stateManager.stuckTxRemoveTime / 1000}` - ) - this.statemanager_fatal( - `txSafelyRemoved_1`, - `stuck_in_consensus_3 txid: ${shortID} state: ${queueEntry.state} age:${txAge}` - ) - if (logFlags.txCancel) - this.statemanager_fatal(`txSafelyRemoved_1_dump`, `${this.getDebugQueueInfo(queueEntry)}`) - this.removeFromQueue(queueEntry, currentIndex) - continue - } - - if (configContext.stateManager.removeStuckTxsFromQueue2 === true) { - const timeSinceLastVoteMessage = - queueEntry.lastVoteReceivedTimestamp > 0 ? currentTime - queueEntry.lastVoteReceivedTimestamp : 0 - // see if we have been consensing for more than a long time. - // follow up code needs to handle this in a better way - // if there is a broken TX at the end of a chain. this will peel it off. - // any freshly exposed TXs will have a fair amount of time to be in consensus so - // this should minimize the risk of OOS. - if (timeSinceLastVoteMessage > configContext.stateManager.stuckTxRemoveTime2) { - nestedCountersInstance.countEvent( - 'txSafelyRemoved', - `stuck_in_consensus_2 tx waiting for votes more than ${ - configContext.stateManager.stuckTxRemoveTime2 / 1000 - } seconds. state: ${queueEntry.state}` - ) - this.statemanager_fatal( - `txSafelyRemoved_2`, - `stuck_in_consensus_2. waiting for votes. txid: ${shortID} state: ${ - queueEntry.state - } age:${txAge} tx first vote seen ${timeSinceLastVoteMessage / 1000} seconds ago` - ) - if (logFlags.txCancel) - this.statemanager_fatal(`txSafelyRemoved_2_dump`, `${this.getDebugQueueInfo(queueEntry)}`) - this.removeFromQueue(queueEntry, currentIndex) - continue - } - } - - if (configContext.stateManager.removeStuckTxsFromQueue3 === true) { - if (queueEntry.state === 'consensing' && txAge > configContext.stateManager.stuckTxRemoveTime3) { - const anyVotes = queueEntry.lastVoteReceivedTimestamp > 0 - nestedCountersInstance.countEvent( - 'txSafelyRemoved', - `stuck_in_consensus_3 tx in consensus more than ${ - configContext.stateManager.stuckTxRemoveTime3 / 1000 - } seconds. state: ${queueEntry.state} has seen vote: ${anyVotes}` - ) - this.statemanager_fatal( - `txSafelyRemoved_3`, - `stuck_in_consensus_3. txid: ${shortID} state: ${queueEntry.state} age:${txAge}` - ) - if (logFlags.txCancel) - this.statemanager_fatal(`txSafelyRemoved_3_dump`, `${this.getDebugQueueInfo(queueEntry)}`) - this.removeFromQueue(queueEntry, currentIndex) - continue - } - } - - if (txAge > timeM3 + configContext.stateManager.noVoteSeenExpirationTime && !hasSeenVote) { - // seen no vote but past timeM3 + noVoteSeenExpirationTime - // nestedCountersInstance.countEvent('txExpired', `> timeM3 + noVoteSeenExpirationTime`) - // this.mainLogger.error(`${queueEntry.logID} txAge > timeM3 + noVoteSeenExpirationTime general case. no vote seen`) - if (configContext.stateManager.disableTxExpiration === false) { - this.setTXExpired( - queueEntry, - currentIndex, - 'txAge > timeM3 + noVoteSeenExpirationTime general case. no vote seen' - ) - continue - } - } - if ( - txAge > timeM3 + configContext.stateManager.voteSeenExpirationTime && - hasSeenVote && - !hasSeenConfirmation - ) { - if (configContext.stateManager.disableTxExpiration === false) { - nestedCountersInstance.countEvent('txExpired', `> timeM3 + voteSeenExpirationTime`) - this.mainLogger.error( - `${queueEntry.logID} txAge > timeM3 + voteSeenExpirationTime general case has vote but fail to generate receipt` - ) - this.setTXExpired( - queueEntry, - currentIndex, - 'txAge > timeM3 + voteSeenExpirationTime general case has vote but fail' + ' to' + ' commit the tx' - ) - continue - } - } - if (txAge > timeM3 + configContext.stateManager.confirmationSeenExpirationTime) { - let shouldExpire = true - if (queueEntry.hasRobustConfirmation && queueEntry.isInExecutionHome) { - nestedCountersInstance.countEvent( - 'txExpired', - `> timeM3 + confirmSeenExpirationTime but hasRobustConfirmation = true, not expiring` - ) - shouldExpire = false - } - if (shouldExpire && configContext.stateManager.disableTxExpiration === false) { - nestedCountersInstance.countEvent( - 'txExpired', - `> timeM3 + confirmSeenExpirationTime hasRobustConfirmation: ${queueEntry.hasRobustConfirmation}` - ) - this.setTXExpired( - queueEntry, - currentIndex, - 'txAge > timeM3 + confirmSeenExpirationTime general case has' + - ' vote and robust confirmation but fail' + - ' to' + - ' commit the tx' - ) - continue - } - } - if (txAge > timeM3 + configContext.stateManager.confirmationSeenExpirationTime + 10000) { - // nestedCountersInstance.countEvent('txExpired', `txAge > timeM3 + confirmSeenExpirationTime + 10s`) - // maybe we missed the spread_appliedReceipt2 gossip, go to await final data if we have a confirmation - // we will request the final data (and probably receipt2) - if ( - configContext.stateManager.disableTxExpiration && - hasSeenVote && - queueEntry.firstVoteReceivedTimestamp > 0 - ) { - // nestedCountersInstance.countEvent('txExpired', `> timeM3 + confirmSeenExpirationTime state: ${queueEntry.state} hasSeenVote: ${hasSeenVote} hasSeenConfirmation: ${hasSeenConfirmation} waitForReceiptOnly: ${queueEntry.waitForReceiptOnly}`) - if (this.config.stateManager.txStateMachineChanges) { - if (configContext.stateManager.stuckTxQueueFix) { - if (configContext.stateManager.singleAccountStuckFix) { - const timeSinceVoteSeen = shardusGetTime() - queueEntry.firstVoteReceivedTimestamp - // if we has seenVote but still stuck in consensing state, we should go to await final data and ask receipt+data - - //note: this block below may not be what we want in POQo, but is behind a long time setting for now (in dapp) - //need to consider some clean up here - if ( - queueEntry.state === 'consensing' && - timeSinceVoteSeen > configContext.stateManager.stuckTxMoveTime - ) { - if (logFlags.debug) - this.mainLogger.debug( - `txId ${queueEntry.logID} move stuck consensing tx to await final data. timeSinceVoteSeen: ${timeSinceVoteSeen} ms` - ) - nestedCountersInstance.countEvent('consensus', `move stuck consensing tx to await final data.`) - this.updateTxState(queueEntry, 'await final data') - } - } else { - // make sure we are not resetting the state and causing state start timestamp to be updated repeatedly - if (queueEntry.state !== 'await final data' && queueEntry.state !== 'await repair') - this.updateTxState(queueEntry, 'await final data') - } - } else { - this.updateTxState(queueEntry, 'await final data', 'processTx4') - } - } else { - this.updateTxState(queueEntry, 'consensing') - } - if (configContext.stateManager.stuckTxQueueFix === false) continue // we should not skip this TX - } - if (configContext.stateManager.disableTxExpiration === false) { - this.setTXExpired(queueEntry, currentIndex, 'txAge > timeM3 + confirmSeenExpirationTime + 10s') - continue - } - } - - //If we are past time M2 there are few cases where we should give up on a TX right away - //Handle that here - if (txAge > timeM2) { - let expireTx = false - let reason = '' - //not sure this path can even happen. but we addding it for completeness in case it comes back (abilty to requets receipt) - if (queueEntry.requestingReceiptFailed) { - expireTx = true - reason = 'requestingReceiptFailed' - } - if (queueEntry.repairFailed) { - expireTx = true - reason = 'repairFailed' - } - if (expireTx) { - this.statemanager_fatal( - `txExpired3 > M2. fail ${reason}`, - `txExpired txAge > timeM2 fail ${reason} ` + - `txid: ${shortID} state: ${queueEntry.state} hasAll:${queueEntry.hasAll} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} txExpired >m2 fail ${reason} ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) - //if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.recievedAppliedReceipt 3 requestingReceiptFailed: ${utils.stringifyReduce(queueEntry.recievedAppliedReceipt)}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> timeM2 fail ${reason} state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification} `) - - if (configContext.stateManager.disableTxExpiration === false) { - this.setTXExpired(queueEntry, currentIndex, 'm2 ' + reason) - } - } - } - - //if(extendedTimeoutLogic === true){ - - //} - const isConsensing = queueEntry.state === 'consensing' - //let isCommiting = queueEntry.state === 'commiting' - const isAwaitingFinalData = queueEntry.state === 'await final data' - const isInExecutionHome = queueEntry.isInExecutionHome - //note this wont work with old receipts but we can depricate old receipts soon - const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) - const hasReceipt = signedReceipt != null - const hasCastVote = queueEntry.ourVote != null - - let extraTime = 0 - //let cantExpire = false - let matchingReceipt = false - - if (isInExecutionHome && isConsensing && hasReceipt === false) { - //give a bit of extra time to wait for votes to come in - extraTime = timeM * 0.5 - } - - //this should cover isCommiting - if (isInExecutionHome && hasReceipt) { - matchingReceipt = this.stateManager.transactionConsensus.hasAppliedReceiptMatchingPreApply(queueEntry, null) - //give even more time - extraTime = timeM - } - - // if we have not added extra time yet then add time for a vote. - if (extraTime < timeM && hasCastVote === true) { - //this would be a way to just statically add to the time - //extraTime = timeM - const ageDiff = queueEntry.voteCastAge + timeM - timeM3 - if (ageDiff > 0) { - extraTime = ageDiff - } - } - - if (isAwaitingFinalData) { - if (hasReceipt) { - extraTime = timeM2 * 1.5 - } else { - extraTime = timeM - } - } - - //round extraTime to up to nearest 500ms (needed for counter aggregation) - if (extraTime > 0) { - extraTime = Math.ceil(extraTime / 500) * 500 - if (extraTime > timeM) { - extraTime = timeM - } - } - - // Have a hard cap where we ALMOST expire but NOT remove TXs from queue after time > M3 - if ( - txAge > timeM3 + extraTime && - queueEntry.isInExecutionHome && - queueEntry.almostExpired == null && - configContext.stateManager.disableTxExpiration === false - ) { - const hasVoted = queueEntry.ourVote != null - const receivedVote = queueEntry.receivedBestVote != null - if (!receivedVote && !hasVoted && queueEntry.almostExpired == null) { - this.statemanager_fatal( - `setTxAlmostExpired > M3. general case`, - `setTxAlmostExpired txAge > timeM3 general case ` + - `txid: ${shortID} state: ${queueEntry.state} hasAll:${ - queueEntry.hasAll - } applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge} hasReceipt:${hasReceipt} matchingReceipt:${matchingReceipt} isInExecutionHome:${isInExecutionHome} hasVote: ${ - queueEntry.receivedBestVote != null - }` - ) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `setTxAlmostExpired ${queueEntry.txGroupDebug} txExpired 3 requestingReceiptFailed ${utils.stringifyReduce(queueEntry.acceptedTx)} ${queueEntry.didWakeup}`) - //if (logFlags.playback) this.logger.playbackLogNote('txExpired', `${shortID}`, `${queueEntry.txGroupDebug} queueEntry.recievedAppliedReceipt 3 requestingReceiptFailed: ${utils.stringifyReduce(queueEntry.recievedAppliedReceipt)}`) - - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `setTxAlmostExpired > M3. general case state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification} hasReceipt:${hasReceipt} matchingReceipt:${matchingReceipt} isInExecutionHome:${isInExecutionHome} hasVote: ${queueEntry.receivedBestVote != null}`) - /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `setTxAlmostExpired > M3. general case sieveT:${queueEntry.txSieveTime} extraTime:${extraTime}`) - - nestedCountersInstance.countEvent( - 'txExpired', - 'set to almostExpired because we have not voted' + ' or received' + ' a' + ' vote' - ) - this.setTxAlmostExpired(queueEntry, currentIndex, 'm3 general: almostExpired not voted or received vote') - } - // continue - } - - //TODO? could we remove a TX from the queu as soon as a receit was requested? - //TODO?2 should we allow a TX to use a repair op shortly after being expired? (it would have to be carefull, and maybe use some locking) - } - - const txStartTime = shardusGetTime() - - // HANDLE TX logic based on state. - try { - this.profiler.profileSectionStart(`process-${queueEntry.state}`) - if (logFlags.profiling_verbose) - profilerInstance.scopedProfileSectionStart(`scoped-process-${queueEntry.state}`, false) - pushedProfilerTag = queueEntry.state - - if (queueEntry.state === 'syncing') { - ///////////////////////////////////////////////--syncing--//////////////////////////////////////////////////////////// - // a queueEntry will be put in syncing state if it is queue up while we are doing initial syncing or if - // we are syncing a range of new edge partition data. - // we hold it in limbo until the syncing operation is complete. When complete all of these TXs are popped - // and put back into the queue. If it has been too long they will go into a repair to receipt mode. - // IMPORTANT thing is that we mark the accounts as seen, because we cant use this account data - // in TXs that happen after until this is resolved. - - //the syncing process is not fully reliable when popping synced TX. this is a backup check to see if we can get out of syncing state - if (queueEntry.syncCounter <= 0) { - nestedCountersInstance.countEvent('sync', 'syncing state needs bump') - - queueEntry.waitForReceiptOnly = true - - // old logic changed state here (seen commented out in the new mode) - if (this.config.stateManager.txStateMachineChanges) { - // this.updateTxState(queueEntry, 'await final data') - } else { - this.updateTxState(queueEntry, 'await final data', 'processTx5') - } - } - - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - } else if (queueEntry.state === 'aging') { - queueEntry.executionDebug = { a: 'go' } - ///////////////////////////////////////////--aging--//////////////////////////////////////////////////////////////// - // We wait in the aging phase, and mark accounts as seen to prevent a TX that is after this from using or changing data - // on the accounts in this TX - // note that code much earlier in the loop rejects any queueEntries younger than time M - this.updateTxState(queueEntry, 'processing') - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - } - if (queueEntry.state === 'processing') { - ////////////////////////////////////////--processing--/////////////////////////////////////////////////////////////////// - if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { - // Processing is when we start doing real work. the task is to read and share the correct account data to the correct - // corresponding nodes and then move into awaiting data phase - - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - const time = shardusGetTime() - try { - // TODO re-evaluate if it is correct for us to share info for a global modifing TX. - //if(queueEntry.globalModification === false) { - const awaitStart = shardusGetTime() - - if (this.executeInOneShard === true) { - /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodes(queueEntry)') - profilerInstance.scopedProfileSectionStart(`scoped-tellCorrespondingNodes`) - if (configContext.p2p.useFactCorrespondingTell) { - await this.factTellCorrespondingNodes(queueEntry) - } else { - await this.tellCorrespondingNodes(queueEntry) - } - profilerInstance.scopedProfileSectionEnd(`scoped-tellCorrespondingNodes`) - /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodes(queueEntry)', DebugComplete.Completed) - } else { - /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodesOld(queueEntry)') - //specific fixes were needed for tellCorrespondingNodes. tellCorrespondingNodesOld is the old version before fixes - if (configContext.p2p.useFactCorrespondingTell) { - await this.factTellCorrespondingNodes(queueEntry) - } else { - await this.tellCorrespondingNodes(queueEntry) - } - /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.tellCorrespondingNodesOld(queueEntry)', DebugComplete.Completed) - } - queueEntry.dataSharedTimestamp = shardusGetTime() - if (logFlags.debug) - /* prettier-ignore */ this.mainLogger.debug(`tellCorrespondingNodes: ${queueEntry.logID} dataSharedTimestamp: ${queueEntry.dataSharedTimestamp}`) - - this.updateSimpleStatsObject( - processStats.awaitStats, - 'tellCorrespondingNodes', - shardusGetTime() - awaitStart - ) - - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_processing', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)}`) - //} - } catch (ex) { - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 tellCorrespondingNodes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - this.statemanager_fatal( - `processAcceptedTxQueue2_ex`, - 'processAcceptedTxQueue2 tellCorrespondingNodes:' + ex.name + ': ' + ex.message + ' at ' + ex.stack - ) - queueEntry.dataSharedTimestamp = shardusGetTime() - nestedCountersInstance.countEvent(`processing`, `tellCorrespondingNodes fail`) - - queueEntry.executionDebug.process1 = 'tell fail' - } finally { - this.updateTxState(queueEntry, 'awaiting data', 'mainLoop') - - //if we are not going to execute the TX go strait to consensing - if ( - queueEntry.globalModification === false && - this.executeInOneShard && - queueEntry.isInExecutionHome === false - ) { - //is there a way to preemptively forward data without there being tons of repair.. - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 isInExecutionHome === false. set state = 'consensing' tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp}`) - this.updateTxState(queueEntry, 'consensing', 'fromProcessing') - } - } - queueEntry.executionDebug.processElapsed = shardusGetTime() - time - } else { - const upstreamTx = this.processQueue_getUpstreamTx(seenAccounts, queueEntry) - if (upstreamTx == null) { - /* prettier-ignore */ if (logFlags.seqdiagram && queueEntry?.upStreamBlocker !== 'null') { - queueEntry.upStreamBlocker = 'null' // 'dirty' - this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: upstream:null`) - } - nestedCountersInstance.countEvent('processing', 'busy waiting the upstream tx.' + ' but it is null') - } else { - if (upstreamTx.logID === queueEntry.logID) { - /* prettier-ignore */ if (logFlags.seqdiagram && queueEntry?.upStreamBlocker !== upstreamTx.logID) { - queueEntry.upStreamBlocker = upstreamTx.logID - this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: upstream:same`) - } - //not 100% confident that upstreamTX check works. - if (upstreamTx === queueEntry) { - //this queue entry could be marked as seen due to aging above - nestedCountersInstance.countEvent( - 'processing', - 'busy waiting but the upstream tx reference matches our queue entry' - ) - } else { - nestedCountersInstance.countEvent('processing', 'busy waiting the upstream tx but it is same txId') - } - } else { - /* prettier-ignore */ if (logFlags.seqdiagram && queueEntry?.upStreamBlocker !== upstreamTx.logID) { - queueEntry.upStreamBlocker = upstreamTx.logID - this.seqLogger.info(`0x53455104 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} Note over ${NodeList.activeIdToPartition.get(Self.id)}: upstream:${upstreamTx.logID}`) - } - nestedCountersInstance.countEvent( - 'processing', - `busy waiting the upstream tx to complete. state ${queueEntry.state}` - ) - } - } - } - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - } - if (queueEntry.state === 'awaiting data') { - queueEntry.executionDebug.log = 'entered awaiting data' - - ///////////////////////////////////////--awaiting data--//////////////////////////////////////////////////////////////////// - - // Wait for all data to be aquired. - // Once we have all the data we need we can move to consensing phase. - // IF this is a global account it will go strait to commiting phase since the data was shared by other means. - - // 20240709 this if/else looks like it can go away - // if (this.queueTimingFixes === true) { - // if (txAge > timeM2_5) { - // // const isBlocked = this.processQueue_accountSeen(seenAccounts, queueEntry) - // // //need to review this in context of sharding - // // /* prettier-ignore */ nestedCountersInstance.countEvent('txExpired', `> M2.5 canceled due to lack of progress. state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification} isBlocked:${isBlocked}`) - // // const missingAccounts = this.queueEntryListMissingData(queueEntry) - // // nestedCountersInstance.countEvent('txExpired', `missing accounts: ${missingAccounts.length}`) - // // if (logFlags.playback) { - // // this.logger.playbackLogNote( - // // 'txExpired>M2.5', - // // `${shortID}`, - // // `> M2.5 canceled due to lack of progress. state:${queueEntry.state} hasAll:${ - // // queueEntry.hasAll - // // } globalMod:${ - // // queueEntry.globalModification - // // } isBlocked:${isBlocked} missing:${utils.stringifyReduce(missingAccounts)}` - // // ) - // // } - // // //Log as error also.. can comment this out later - // // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`txExpired > M2.5 canceled due to lack of progress. state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification} isBlocked:${isBlocked} missing:${utils.stringifyReduce(missingAccounts)}`) - // // this.setTXExpired(queueEntry, currentIndex, 'm2.5 awaiting data') - // // continue - // } - // } else { - // // catch all in case we get waiting for data - // if (txAge > timeM2_5) { - // this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - // /* prettier-ignore */ nestedCountersInstance.countEvent('processing', `awaiting data txAge > m2.5 set to consensing hasAll:${queueEntry.hasAll} hasReceivedApplyReceipt:${hasReceivedApplyReceipt}`) - - // queueEntry.waitForReceiptOnly = true - - // if(this.config.stateManager.txStateMachineChanges){ - // this.updateTxState(queueEntry, 'await final data', 'processTx6') - // } else { - // this.updateTxState(queueEntry, 'consensing') - // } - // continue - // } - // } - - // TODO review this block below in more detail. - // check if we have all accounts - if (queueEntry.hasAll === false && txAge > timeM2) { - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - - if (queueEntry.pendingDataRequest === true) { - //early out after marking seen, because we are already asking for data - //need to review this in context of sharding - nestedCountersInstance.countEvent('processing', 'awaiting data. pendingDataRequest') - continue - } - - if (this.queueEntryHasAllData(queueEntry) === true) { - // I think this can't happen - /* prettier-ignore */ nestedCountersInstance.countEvent('processing', 'data missing at t>M2. but not really. investigate further') - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_hadDataAfterall', `${shortID}`, `This is kind of an error, and should not happen`) - continue - } - - // This code is wrong, so disabling it for now - // if ( - // this.queueTimingFixes === true && - // this.processQueue_accountSeen(seenAccounts, queueEntry) === true - // ) { - // //we are stuck in line so no cause to ask for data yet. - - // //TODO may need a flag that know if a TX was stuck until time m.. then let it not - // //ask for other accoun data right away... - - // //This counter seems wrong. the processQueue_accountSeen is just detecting where we - // // called processQueue_markAccountsSeen before - // nestedCountersInstance.countEvent(`processing`, `awaiting data. stuck in line - but not really`) - // continue - // } - - //TODO check for receipt and move to repair state / await final data - - if (this.config.stateManager.awaitingDataCanBailOnReceipt) { - const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) - if (signedReceipt != null) { - //we saw a receipt so we can move to await final data - nestedCountersInstance.countEvent( - 'processing', - 'awaitingDataCanBailOnReceipt: activated. tx state changed from awaiting data to await final data' - ) - this.updateTxState(queueEntry, 'await final data', 'receipt while waiting for initial data') - continue - } - } - - if (this.config.stateManager.requestAwaitedDataAllowed) { - // Before we turn this back on we must set the correct conditions. - // our node may be unaware of how other nodes have upstream blocking TXs that - // prevent them from sharing data. The only safe way to know if we can ask for data - // is to know another node has voted but this has some issues as well - - // 7. Manually request missing state - try { - nestedCountersInstance.countEvent('processing', 'data missing at t>M2. request data') - // Await note: current thinking is that is is best to not await this call. - this.queueEntryRequestMissingData(queueEntry) - } catch (ex) { - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 queueEntryRequestMissingData:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - this.statemanager_fatal( - `processAcceptedTxQueue2_missingData`, - 'processAcceptedTxQueue2 queueEntryRequestMissingData:' + - ex.name + - ': ' + - ex.message + - ' at ' + - ex.stack - ) - } - } - } else if (queueEntry.hasAll) { - queueEntry.executionDebug.log1 = 'has all' - - // we have all the data, but we need to make sure there are no upstream TXs using accounts we need first. - if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - - // As soon as we have all the data we preApply it and then send out a vote - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preApplyTx', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - - // TODO sync related need to reconsider how to set this up again - // if (queueEntry.didSync) { - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_consensing', `${queueEntry.acceptedTx.id}`, ` qId: ${queueEntry.entryID}`) - // // if we did sync it is time to JIT query local data. alternatively could have other nodes send us this data, but that could be very high bandwidth. - // for (let key of queueEntry.syncKeys) { - // let wrappedState = await this.app.getRelevantData(key, queueEntry.acceptedTx.data) - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_getLocalData', `${queueEntry.acceptedTx.id}`, ` qId: ${queueEntry.entryID} key:${utils.makeShortHash(key)} hash:${wrappedState.stateId}`) - // queueEntry.localCachedData[key] = wrappedState.localCache - // } - // } - - try { - //This is a just in time check to make sure our involved accounts - //have not changed after our TX timestamp - const accountsValid = this.checkAccountTimestamps(queueEntry) - if (accountsValid === false) { - this.updateTxState(queueEntry, 'consensing') - queueEntry.preApplyTXResult = { - applied: false, - passed: false, - applyResult: 'failed account TS checks', - reason: 'apply result', - applyResponse: null, - } - continue - } - - if (queueEntry.transactionGroup.length > 1) { - queueEntry.robustAccountDataPromises = {} - } - - queueEntry.executionDebug.log2 = 'call pre apply' - const awaitStart = shardusGetTime() - /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.preApplyTransaction(queueEntry)') - let txResult = undefined - if (this.config.stateManager.transactionApplyTimeout > 0) { - //use the withTimeout from util/promises to call preApplyTransaction with a timeout - txResult = await withTimeout( - () => this.preApplyTransaction(queueEntry), - this.config.stateManager.transactionApplyTimeout - ) - if (txResult === 'timeout') { - //if we got a timeout, we need to set the txResult to null - txResult = null - nestedCountersInstance.countEvent('processing', 'timeout-preApply') - this.statemanager_fatal( - 'timeout-preApply', - `preApplyTransaction timed out for txid: ${ - queueEntry.logID - } ${this.getDebugProccessingStatus()}` - ) - //need to clear any stuck fifo locks. Would be better to solve upstream problems. - this.stateManager.forceUnlockAllFifoLocks('timeout-preApply') - } - } else { - txResult = await this.preApplyTransaction(queueEntry) - } - - /* prettier-ignore */ this.setDebugLastAwaitedCall('this.stateManager.transactionQueue.preApplyTransaction(queueEntry)', DebugComplete.Completed) - this.updateSimpleStatsObject( - processStats.awaitStats, - 'preApplyTransaction', - shardusGetTime() - awaitStart - ) - - queueEntry.executionDebug.log3 = 'called pre apply' - queueEntry.executionDebug.txResult = txResult - - if ( - configContext.stateManager.forceVoteForFailedPreApply || - (txResult && txResult.applied === true) - ) { - this.updateTxState(queueEntry, 'consensing') - - queueEntry.preApplyTXResult = txResult - - // make sure our data wrappers are upt to date with the correct hash and timstamp - for (const key of Object.keys(queueEntry.collectedData)) { - // eslint-disable-next-line security/detect-object-injection - const wrappedAccount = queueEntry.collectedData[key] - const { timestamp, hash } = this.app.getTimestampAndHashFromAccount(wrappedAccount.data) - if (wrappedAccount.timestamp != timestamp) { - wrappedAccount.timestamp = timestamp - nestedCountersInstance.countEvent('transactionQueue', 'correctedTimestamp') - } - // eslint-disable-next-line security/detect-possible-timing-attacks - if (wrappedAccount.stateId != hash) { - wrappedAccount.stateId = hash - nestedCountersInstance.countEvent('transactionQueue', 'correctedHash') - } - } - - //Broadcast our vote - if (queueEntry.noConsensus === true) { - // not sure about how to share or generate an applied receipt though for a no consensus step - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preApplyTx_noConsensus', `${shortID}`, ``) - - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 noConsensus : ${queueEntry.logID} `) - - this.updateTxState(queueEntry, 'commiting') - - queueEntry.hasValidFinalData = true - // TODO Global receipts? do we want them? - // if(queueEntry.globalModification === false){ - // //Send a special receipt because this is a set command. - // } - } else { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preApplyTx_createAndShareVote', `${shortID}`, ``) - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 calling createAndShareVote : ${queueEntry.logID} `) - const awaitStart = shardusGetTime() - - queueEntry.voteCastAge = txAge - /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.createAndShareVote(queueEntry)' ) - await this.stateManager.transactionConsensus.createAndShareVote(queueEntry) - /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.createAndShareVote(queueEntry)', DebugComplete.Completed ) - this.updateSimpleStatsObject( - processStats.awaitStats, - 'createAndShareVote', - shardusGetTime() - awaitStart - ) - } - } else { - //There was some sort of error when we tried to apply the TX - //Go directly into 'consensing' state, because we need to wait for a receipt that is good. - /* prettier-ignore */ nestedCountersInstance.countEvent('processing', `txResult apply error. applied: ${txResult?.applied}`) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`processAcceptedTxQueue2 txResult problem txid:${queueEntry.logID} res: ${utils.stringifyReduce(txResult)} `) - queueEntry.waitForReceiptOnly = true - - // if apply failed, we need to go to consensing to get a receipt - this.updateTxState(queueEntry, 'consensing') - //TODO: need to flag this case so that it does not artificially increase the network load - } - } catch (ex) { - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 preApplyAcceptedTransaction:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - this.statemanager_fatal( - `processAcceptedTxQueue2b_ex`, - 'processAcceptedTxQueue2 preApplyAcceptedTransaction:' + - ex.name + - ': ' + - ex.message + - ' at ' + - ex.stack - ) - } finally { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_preapplyFinish', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - } - } else { - queueEntry.executionDebug.logBusy = 'has all, but busy' - nestedCountersInstance.countEvent('processing', 'has all, but busy') - } - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - } else { - // mark accounts as seen while we are waiting for data - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - } - } else if (queueEntry.state === 'consensing') { - /////////////////////////////////////////--consensing--////////////////////////////////////////////////////////////////// - if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - - let didNotMatchReceipt = false - - let finishedConsensing = false - let result: SignedReceipt - - // if (this.usePOQo) { - // Try to produce receipt - // If receipt made, tellx128 it to execution group - // that endpoint should then factTellCorrespondingNodesFinalData - const receipt2 = queueEntry.receivedSignedReceipt ?? queueEntry.signedReceipt - if (receipt2 != null) { - if (logFlags.debug) - this.mainLogger.debug( - `processAcceptedTxQueue2 consensing : ${queueEntry.logID} receiptRcv:${hasReceivedApplyReceipt}` - ) - nestedCountersInstance.countEvent(`consensus`, 'tryProduceReceipt receipt2 != null') - //we have a receipt2, so we can make a receipt - result = queueEntry.signedReceipt - } else { - result = await this.stateManager.transactionConsensus.tryProduceReceipt(queueEntry) - } - // } - // else if (this.useNewPOQ) { - // this.stateManager.transactionConsensus.confirmOrChallenge(queueEntry) - - // if (queueEntry.pendingConfirmOrChallenge.size > 0 && queueEntry.robustQueryVoteCompleted === true && queueEntry.acceptVoteMessage === false) { - // this.mainLogger.debug(`processAcceptedTxQueue2 consensing : ${queueEntry.logID} pendingConfirmOrChallenge.size = ${queueEntry.pendingConfirmOrChallenge.size}`) - // for (const [nodeId, confirmOrChallenge] of queueEntry.pendingConfirmOrChallenge) { - // const appendSuccessful = this.stateManager.transactionConsensus.tryAppendMessage(queueEntry, confirmOrChallenge) - // if (appendSuccessful) { - // // we need forward the message to other nodes if append is successful - // const payload = confirmOrChallenge - // const gossipGroup = this.stateManager.transactionQueue.queueEntryGetTransactionGroup(queueEntry) - // Comms.sendGossip('spread_confirmOrChallenge', payload, '', null, gossipGroup, false, 10, queueEntry.acceptedTx.txId) - // queueEntry.gossipedConfirmOrChallenge = true - // } - // } - // queueEntry.pendingConfirmOrChallenge = new Map() - // this.mainLogger.debug(`processAcceptedTxQueue2 consensing : ${queueEntry.logID} reset pendingConfirmOrChallenge.size = ${queueEntry.pendingConfirmOrChallenge.size}`) - // } - - // // try to produce a receipt - // /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 consensing : ${queueEntry.logID} receiptRcv:${hasReceivedApplyReceipt}`) - - // const receipt2 = queueEntry.recievedAppliedReceipt2 ?? queueEntry.appliedReceipt2 - // if (receipt2 != null) { - // nestedCountersInstance.countEvent(`consensus`, 'tryProduceReceipt receipt2 != null') - // //we have a receipt2, so we can make a receipt - // result = { - // result: receipt2.result, - // appliedVotes: [receipt2.appliedVote], // everything is the same but the applied vote is an array - // confirmOrChallenge: [receipt2.confirmOrChallenge], - // txid: receipt2.txid, - // app_data_hash: receipt2.app_data_hash, - // } - // } else { - // result = queueEntry.appliedReceipt - // } - - // if (result == null) { - // this.stateManager.transactionConsensus.tryProduceReceipt(queueEntry) - // } - // } else { - // const receipt2 = queueEntry.recievedAppliedReceipt2 ?? queueEntry.appliedReceipt2 - // if (receipt2 != null) { - // if (logFlags.debug) - // this.mainLogger.debug( - // `processAcceptedTxQueue2 consensing : ${queueEntry.logID} receiptRcv:${hasReceivedApplyReceipt}` - // ) - // nestedCountersInstance.countEvent(`consensus`, 'tryProduceReceipt receipt2 != null') - // //we have a receipt2, so we can make a receipt - // result = { - // result: receipt2.result, - // appliedVotes: [receipt2.appliedVote], // everything is the same but the applied vote is an array - // confirmOrChallenge: [receipt2.confirmOrChallenge], - // txid: receipt2.txid, - // app_data_hash: receipt2.app_data_hash, - // } - // } else { - // result = await this.stateManager.transactionConsensus.tryProduceReceipt(queueEntry) - // } - // } - - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt result : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) - - //todo this is false.. and prevents some important stuff. - //need to look at appliedReceipt2 - const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) - if (signedReceipt != null) { - //TODO share receipt with corresponding index - - if (logFlags.debug || this.stateManager.consensusLog) { - this.mainLogger.debug( - `processAcceptedTxQueue2 tryProduceReceipt final result : ${ - queueEntry.logID - } ${utils.stringifyReduce(result)}` - ) - } - - const isReceiptMatchPreApply = this.stateManager.transactionConsensus.hasAppliedReceiptMatchingPreApply( - queueEntry, - result - ) - if (logFlags.debug || this.stateManager.consensusLog) { - this.mainLogger.debug( - `processAcceptedTxQueue2 tryProduceReceipt isReceiptMatchPreApply : ${queueEntry.logID} ${isReceiptMatchPreApply}` - ) - } - - // we should send the receipt if we are in the top 5 nodes - // const isConfirmedReceipt = receipt2.confirmOrChallenge?.message === 'confirm' - // const isChallengedReceipt = receipt2.confirmOrChallenge?.message === 'challenge' - // let shouldSendReceipt = false - // if (queueEntry.isInExecutionHome) { - // if (this.usePOQo) { - // // Already handled above - // shouldSendReceipt = false - // } - // else if (this.useNewPOQ) { - // let numberOfSharingNodes = configContext.stateManager.nodesToGossipAppliedReceipt - // if (numberOfSharingNodes > queueEntry.executionGroup.length) numberOfSharingNodes = queueEntry.executionGroup.length - // const highestRankedNodeIds = queueEntry.executionGroup.slice(0, numberOfSharingNodes).map(n => n.id) - // if (highestRankedNodeIds.includes(Self.id)) { - // if (isChallengedReceipt) shouldSendReceipt = true - // else if (isConfirmedReceipt && isReceiptMatchPreApply) shouldSendReceipt = true - // } - // } else { - // shouldSendReceipt = true - // } - - // if (shouldSendReceipt) { - // // Broadcast the receipt, only if we made one (try produce can early out if we received one) - // const awaitStart = shardusGetTime() - // /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.shareAppliedReceipt()' ) - // this.stateManager.transactionConsensus.shareAppliedReceipt(queueEntry) - // /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.shareAppliedReceipt()', DebugComplete.Completed ) - - // this.updateSimpleStatsObject( - // processStats.awaitStats, - // 'shareAppliedReceipt', - // shardusGetTime() - awaitStart - // ) - // } - // } - - // remove from the queue if receipt2 is a challenged receipt - // if (isChallengedReceipt && this.useNewPOQ) { - // const txId = queueEntry.acceptedTx.txId - // const logID = queueEntry.logID - // this.updateTxState(queueEntry, 'fail') - // this.removeFromQueue(queueEntry, currentIndex, true) // we don't want to archive this - // nestedCountersInstance.countEvent('consensus', 'isChallengedReceipt: true removing from queue') - // this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt isChallengedReceipt : ${logID}. remove from queue`) - // continue - // } - - // not a challenge receipt but check the tx result - if (isReceiptMatchPreApply && queueEntry.isInExecutionHome) { - nestedCountersInstance.countEvent('consensus', 'hasAppliedReceiptMatchingPreApply: true') - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_madeReceipt', `${shortID}`, `qId: ${queueEntry.entryID} `) - - //todo check cant_apply flag to make sure a vote can form with it! - //also check if failed votes will work...? - if ( - this.stateManager.getReceiptProposal(queueEntry).cant_preApply === false && - this.stateManager.getReceiptResult(queueEntry) === true - ) { - this.updateTxState(queueEntry, 'commiting') - queueEntry.hasValidFinalData = true - finishedConsensing = true - } else { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt1', `${shortID}`, `qId: ${queueEntry.entryID} `) - // we are finished since there is nothing to apply - if (logFlags.debug || this.stateManager.consensusLog) { - /* prettier-ignore */ this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) - /* prettier-ignore */ this.statemanager_fatal(`processAcceptedTxQueue2`, `tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) - } - nestedCountersInstance.countEvent( - 'consensus', - 'tryProduceReceipt failed result = false or' + ' challenged' - ) - this.updateTxState(queueEntry, 'fail') - this.removeFromQueue(queueEntry, currentIndex) - continue - } - - if ( - queueEntry.globalModification === false && - finishedConsensing === true && - this.executeInOneShard && - queueEntry.isInExecutionHome - ) { - //forward all finished data to corresponding nodes - const awaitStart = shardusGetTime() - // This is an async function but we do not await it - if (configContext.stateManager.attachDataToReceipt === false) { - if (configContext.p2p.useFactCorrespondingTell) { - this.factTellCorrespondingNodesFinalData(queueEntry) - } - // else { - // this.tellCorrespondingNodesFinalData(queueEntry) - // } - } - this.updateSimpleStatsObject( - processStats.awaitStats, - 'tellCorrespondingNodesFinalData', - shardusGetTime() - awaitStart - ) - } - //continue - } else { - nestedCountersInstance.countEvent( - 'consensus', - `hasAppliedReceiptMatchingPreApply: false, isInExecutionHome: ${queueEntry.isInExecutionHome}` - ) - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_gotReceiptNoMatch1', `${shortID}`, `qId: ${queueEntry.entryID} `) - if (this.stateManager.getReceiptResult(queueEntry) === false) { - // We got a reciept, but the consensus is that this TX was not applied. - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt2', `${shortID}`, `qId: ${queueEntry.entryID} `) - // we are finished since there is nothing to apply - /* prettier-ignore */ if (logFlags.verbose) this.statemanager_fatal( - `consensing: on a failed receipt`, - `consensing: got a failed receipt for ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` - ) - if (logFlags.debug || this.stateManager.consensusLog) { - /* prettier-ignore */ this.mainLogger.debug(`processAcceptedTxQueue2 tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) - /* prettier-ignore */ this.statemanager_fatal(`processAcceptedTxQueue2`, `tryProduceReceipt failed result: false : ${queueEntry.logID} ${utils.stringifyReduce(result)}`) - } - nestedCountersInstance.countEvent('consensus', 'consensed on failed result') - this.updateTxState(queueEntry, 'fail') - this.removeFromQueue(queueEntry, currentIndex) - continue - } - didNotMatchReceipt = true - queueEntry.signedReceiptForRepair = result - - // queueEntry.appliedReceiptForRepair2 = this.stateManager.getReceipt2(queueEntry) - if (queueEntry.isInExecutionHome === false && queueEntry.signedReceipt != null) { - if (this.stateManager.consensusLog) - this.mainLogger.debug( - `processTransactions ${queueEntry.logID} we are not execution home, but we have a receipt2, go to await final data` - ) - this.updateTxState(queueEntry, 'await final data', 'processTx7') - } - } - } - if (finishedConsensing === false) { - // if we got a reciept while waiting see if we should use it (if our own vote matches) - if (hasReceivedApplyReceipt && queueEntry.receivedSignedReceipt != null) { - if ( - this.stateManager.transactionConsensus.hasAppliedReceiptMatchingPreApply( - queueEntry, - queueEntry.receivedSignedReceipt - ) - ) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_gotReceipt', `${shortID}`, `qId: ${queueEntry.entryID} `) - - //todo check cant_apply flag to make sure a vote can form with it! - if ( - this.stateManager.getReceiptProposal(queueEntry).cant_preApply === false && - this.stateManager.getReceiptResult(queueEntry) === true - ) { - this.updateTxState(queueEntry, 'commiting') - queueEntry.hasValidFinalData = true - finishedConsensing = true - } else { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt2', `${shortID}`, `qId: ${queueEntry.entryID} `) - // we are finished since there is nothing to apply - //this.statemanager_fatal(`consensing: repairToMatchReceipt failed`, `consensing: repairToMatchReceipt failed ` + `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} recievedAppliedReceipt:${hasReceivedApplyReceipt} age:${txAge}`) - this.removeFromQueue(queueEntry, currentIndex) - this.updateTxState(queueEntry, 'fail') - continue - } - - //continue - } else { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_gotReceiptNoMatch2', `${shortID}`, `qId: ${queueEntry.entryID} `) - didNotMatchReceipt = true - queueEntry.signedReceiptForRepair = queueEntry.receivedSignedReceipt - - // queueEntry.appliedReceiptForRepair2 = this.stateManager.getReceipt2(queueEntry) - queueEntry.signedReceiptForRepair = this.stateManager.getSignedReceipt(queueEntry) - } - } else { - //just keep waiting for a reciept - if (this.config.p2p.stuckNGTInQueueFix && queueEntry.isNGT && txAge > timeM5) { - // entry is an NGT so we want to remove it if consensing fails to prevent from getting stuck - nestedCountersInstance.countEvent(`consensus`, 'removing NGT from queue after failed consensing') - this.updateTxState(queueEntry, 'fail') - this.removeFromQueue(queueEntry, currentIndex) - this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) - continue - } - } - - // we got a receipt but did not match it. - if (didNotMatchReceipt === true && queueEntry.isInExecutionHome) { - nestedCountersInstance.countEvent('stateManager', 'didNotMatchReceipt') - if (queueEntry.debugFail_failNoRepair) { - this.updateTxState(queueEntry, 'fail') - this.removeFromQueue(queueEntry, currentIndex) - nestedCountersInstance.countEvent('stateManager', 'debugFail_failNoRepair') - this.statemanager_fatal( - `processAcceptedTxQueue_debugFail_failNoRepair2`, - `processAcceptedTxQueue_debugFail_failNoRepair2 tx: ${shortID} cycle:${ - queueEntry.cycleToRecordOn - } accountkeys: ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}` - ) - this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) - continue - } - - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_didNotMatchReceipt', `${shortID}`, `qId: ${queueEntry.entryID} result:${queueEntry.signedReceiptForRepair.proposal.applied} `) - queueEntry.repairFinished = false - if (queueEntry.signedReceiptForRepair.proposal.applied === true) { - // need to start repair process and wait - //await note: it is best to not await this. it should be an async operation. - if ( - configContext.stateManager.noRepairIfDataAttached && - configContext.stateManager.attachDataToReceipt - ) { - // we have received the final data, so we can just go to "await final data" and commit the accounts - this.updateTxState(queueEntry, 'await final data') - } else { - this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) - this.updateTxState(queueEntry, 'await repair') - } - continue - } else { - // We got a reciept, but the consensus is that this TX was not applied. - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_consensingComplete_finishedFailReceipt3', `${shortID}`, `qId: ${queueEntry.entryID} `) - // we are finished since there is nothing to apply - this.statemanager_fatal( - `consensing: repairToMatchReceipt failed`, - `consensing: repairToMatchReceipt failed ` + - `txid: ${shortID} state: ${queueEntry.state} applyReceipt:${hasApplyReceipt} receivedSignedReceipt:${hasReceivedApplyReceipt} age:${txAge}` - ) - this.removeFromQueue(queueEntry, currentIndex) - this.updateTxState(queueEntry, 'fail') - continue - } - } - } - } else { - nestedCountersInstance.countEvent('consensus', 'busy waiting') - } - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - } - if (queueEntry.state === 'await repair') { - ///////////////////////////////////////////--await repair--//////////////////////////////////////////////////////////////// - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - - // Special state that we are put in if we are waiting for a repair to receipt operation to conclude - if (queueEntry.repairFinished === true) { - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitRepair_repairFinished', `${shortID}`, `qId: ${queueEntry.entryID} result:${queueEntry.signedReceiptForRepair.proposal.applied} txAge:${txAge} `) - if (queueEntry.signedReceiptForRepair.proposal.applied === true) { - this.updateTxState(queueEntry, 'pass') - } else { - // technically should never get here, because we dont need to repair to a receipt when the network did not apply the TX - this.updateTxState(queueEntry, 'fail') - } - // most remove from queue at the end because it compacts the queue entry - this.removeFromQueue(queueEntry, currentIndex) - - // console.log('Await Repair Finished', queueEntry.acceptedTx.txId, queueEntry) - - nestedCountersInstance.countEvent('stateManager', 'repairFinished') - continue - } else if (queueEntry.repairFailed === true) { - // if the repair failed, we need to fail the TX. Let the patcher take care of it. - this.updateTxState(queueEntry, 'fail') - this.removeFromQueue(queueEntry, currentIndex) - nestedCountersInstance.countEvent('stateManager', 'repairFailed') - continue - } - } - if (queueEntry.state === 'await final data') { - //wait patiently for data to match receipt - //if we run out of time repair to receipt? - - if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - - // //temp hack ... hopefully this hack can go away - // if (queueEntry.recievedAppliedReceipt == null || queueEntry.recievedAppliedReceipt2 == null) { - // const result = await this.stateManager.transactionConsensus.tryProduceReceipt(queueEntry) - // if (result != null) { - // queueEntry.recievedAppliedReceipt = result - // /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitFinalData_hackReceipt', `${shortID}`, `qId: ${queueEntry.entryID} result:${utils.stringifyReduce(result)}`) - // } - // } - - // remove from queue if we have commited data for this tx - if (configContext.stateManager.attachDataToReceipt && queueEntry.accountDataSet === true) { - if (logFlags.debug) - this.mainLogger.debug( - `shrd_awaitFinalData_removeFromQueue : ${queueEntry.logID} because accountDataSet is true` - ) - this.removeFromQueue(queueEntry, currentIndex) - //this will possibly skip critical stats or exit steps that invoke a transaction applied event to the dapp - continue - } - - //collectedFinalData - //PURPL-74 todo: get the vote from queueEntry.receivedBestVote or receivedBestConfirmation instead of receipt2 - const signedReceipt = this.stateManager.getSignedReceipt(queueEntry) - const timeSinceAwaitFinalStart = - queueEntry.txDebug.startTimestamp['await final data'] > 0 - ? shardusGetTime() - queueEntry.txDebug.startTimestamp['await final data'] - : 0 - - // if(configContext.stateManager.removeStuckChallengedTXs && this.useNewPOQ) { - // // first check if this is a challenge receipt - // if (receipt2 && receipt2.confirmOrChallenge.message === 'challenge') { - // if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_challenge : ${queueEntry.logID} challenge from receipt2`) - // this.updateTxState(queueEntry, 'fail') - // this.removeFromQueue(queueEntry, currentIndex) - // continue - // } if (receipt2 == null && queueEntry.receivedBestChallenge) { - // const enoughUniqueChallenges = queueEntry.uniqueChallengesCount >= configContext.stateManager.minRequiredChallenges - // if (enoughUniqueChallenges) { - // if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_challenge : ${queueEntry.logID} has unique challenges`) - // this.updateTxState(queueEntry, 'fail') - // this.removeFromQueue(queueEntry, currentIndex) - // } else if (timeSinceAwaitFinalStart > 1000 * 30) { - // // if we have a challenge and we have waited for a minute, we can fail the tx - // if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_challenge : ${queueEntry.logID} not enough but waited long enough`) - // this.updateTxState(queueEntry, 'fail') - // this.removeFromQueue(queueEntry, currentIndex) - // } else { - // if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_challenge : ${queueEntry.logID} not enough challenges but waited ${timeSinceAwaitFinalStart}ms`) - // } - // continue - // } - // } - - const accountsNotStored = new Set() - //if we got a vote above then build a list of accounts that we store but are missing in our - //collectedFinalData - if (signedReceipt) { - let failed = false - let incomplete = false - let skipped = 0 - const missingAccounts = [] - const nodeShardData: StateManagerTypes.shardFunctionTypes.NodeShardData = - this.stateManager.currentCycleShardData.nodeShardData - - /* eslint-disable security/detect-object-injection */ - for (let i = 0; i < signedReceipt.proposal.accountIDs.length; i++) { - const accountID = signedReceipt.proposal.accountIDs[i] - const accountHash = signedReceipt.proposal.afterStateHashes[i] - - //only check for stored keys. - if (ShardFunctions.testAddressInRange(accountID, nodeShardData.storedPartitions) === false) { - skipped++ - accountsNotStored.add(accountID) - continue - } - - const wrappedAccount = queueEntry.collectedFinalData[accountID] - if (wrappedAccount == null) { - incomplete = true - queueEntry.debug.waitingOn = accountID - missingAccounts.push(accountID) - // break - } - if (wrappedAccount && wrappedAccount.stateId != accountHash) { - if (logFlags.debug) - this.mainLogger.debug( - `shrd_awaitFinalData_failed : ${queueEntry.logID} wrappedAccount.stateId != accountHash from the vote` - ) - failed = true - //we should be verifying the tate IDS that are pushed into collectedFinal data so this should not happen. if it does that could cause a stuck TX / local oos - nestedCountersInstance.countEvent( - 'stateManager', - `shrd_awaitFinalData failed state check wrappedAccount.stateId != accountHash` - ) - break - } - } - - // if we have missing accounts, we need to request the data - if (incomplete && missingAccounts.length > 0) { - nestedCountersInstance.countEvent( - 'stateManager', - `shrd_awaitFinalData missing accounts ${missingAccounts.length}` - ) - - // start request process for missing data if we waited long enough - let shouldStartFinalDataRequest = false - if (timeSinceAwaitFinalStart > 5000) { - shouldStartFinalDataRequest = true - if (logFlags.verbose) - /* prettier-ignore */ this.mainLogger.debug(`shrd_awaitFinalData_incomplete : ${queueEntry.logID} starting finalDataRequest timeSinceDataShare: ${timeSinceAwaitFinalStart}`) - } else if (txAge > timeM3) { - // by this time we should have all the data we need - shouldStartFinalDataRequest = true - if (logFlags.verbose) - /* prettier-ignore */ this.mainLogger.debug(`shrd_awaitFinalData_incomplete : ${queueEntry.logID} starting finalDataRequest txAge > timeM3 + confirmationSeenExpirationTime`) - } - - // start request process for missing data - const timeSinceLastFinalDataRequest = shardusGetTime() - queueEntry.lastFinalDataRequestTimestamp - if ( - this.config.stateManager.canRequestFinalData && - shouldStartFinalDataRequest && - timeSinceLastFinalDataRequest > 5000 - ) { - nestedCountersInstance.countEvent('stateManager', 'requestFinalData') - this.requestFinalData(queueEntry, missingAccounts) - queueEntry.lastFinalDataRequestTimestamp = shardusGetTime() - continue - } - } else { - nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData not missing accounts') - } - - /* eslint-enable security/detect-object-injection */ - - if (failed === true) { - nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData failed') - this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) - this.updateTxState(queueEntry, 'await repair') - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitFinalData_failed', `${shortID}`, `qId: ${queueEntry.entryID} skipped:${skipped}`) - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_failed : ${queueEntry.logID} `) - continue - } - - // This is the case where awaiting final data has succeeded. Store the final data and remove TX from the queue - if (failed === false && incomplete === false) { - //setting this for completeness, but the TX will be removed from the queue at the end of this section - queueEntry.hasValidFinalData = true - - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_awaitFinalData_passed', `${shortID}`, `qId: ${queueEntry.entryID} skipped:${skipped}`) - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`shrd_awaitFinalData_passed : ${queueEntry.logID} skipped:${skipped}`) - - //TODO vote order should be in apply response order! - //This matters for certain daps only. No longer important to shardeum - const rawAccounts = [] - const accountRecords: Shardus.WrappedData[] = [] - /* eslint-disable security/detect-object-injection */ - for (let i = 0; i < signedReceipt.proposal.accountIDs.length; i++) { - const accountID = signedReceipt.proposal.accountIDs[i] - //skip accounts we don't store - if (accountsNotStored.has(accountID)) { - continue - } - const wrappedAccount = queueEntry.collectedFinalData[accountID] - rawAccounts.push(wrappedAccount.data) - accountRecords.push(wrappedAccount) - } - - nestedCountersInstance.countEvent( - 'stateManager', - `shrd_awaitFinalData got data, time to save it ${accountRecords.length}` - ) - /* eslint-enable security/detect-object-injection */ - //await this.app.setAccountData(rawAccounts) - const awaitStart = shardusGetTime() - /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.checkAndSetAccountData()' ) - await this.stateManager.checkAndSetAccountData( - accountRecords, - `txId: ${queueEntry.logID} awaitFinalData_passed`, - false - ) - - /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.checkAndSetAccountData()', DebugComplete.Completed ) - queueEntry.accountDataSet = true - // endpoint to allow dapp to execute something that depends on a transaction being approved. - this.app.transactionReceiptPass( - queueEntry.acceptedTx.data, - queueEntry.collectedFinalData, - queueEntry?.preApplyTXResult?.applyResponse, - false - ) - /* prettier-ignore */ if (logFlags.verbose) console.log('transactionReceiptPass 1', queueEntry.acceptedTx.txId, queueEntry) - this.updateSimpleStatsObject( - processStats.awaitStats, - 'checkAndSetAccountData', - shardusGetTime() - awaitStart - ) - - //log tx processed if needed - if ( - queueEntry != null && - queueEntry.transactionGroup != null && - this.p2p.getNodeId() === queueEntry.transactionGroup[0].id - ) { - if (queueEntry.globalModification === false) { - //temp way to make global modifying TXs not over count - this.stateManager.eventEmitter.emit('txProcessed') - } - } - - if ( - queueEntry.receivedSignedReceipt?.proposal?.applied === true || - queueEntry.signedReceipt?.proposal?.applied === true - ) { - this.updateTxState(queueEntry, 'pass') - } else { - /* prettier-ignore */ - if (logFlags.debug) this.mainLogger.error(`shrd_awaitFinalData_fail : ${queueEntry.logID} no receivedSignedReceipt. signedReceipt: ${utils.stringifyReduce(queueEntry.signedReceipt)}`); - this.updateTxState(queueEntry, 'fail') - } - this.removeFromQueue(queueEntry, currentIndex) - } - } else { - nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData noVote') - // todo: what to do if we have no vote? discuss with Omar - } - } else { - const upstreamTx = this.processQueue_getUpstreamTx(seenAccounts, queueEntry) - if (queueEntry.executionDebug == null) queueEntry.executionDebug = {} - queueEntry.executionDebug.logFinalData = `has all final data, but busy. upstreamTx: ${upstreamTx?.logID}` - if (upstreamTx == null) { - queueEntry.executionDebug.logFinalData = `has all final data, but busy. upstreamTx: null` - nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData busy. upstreamTx: null') - } else { - if (upstreamTx.acceptedTx.txId === queueEntry.acceptedTx.txId) { - nestedCountersInstance.countEvent('stateManager', 'shrd_awaitFinalData busy. upstreamTx same tx') - } else { - nestedCountersInstance.countEvent( - 'stateManager', - `shrd_awaitFinalData busy. upstream tx state: ${upstreamTx?.state}` - ) - } - } - } - } - if (queueEntry.state === 'commiting') { - ///////////////////////////////////////////--commiting--//////////////////////////////////////////////////////////////// - if (this.processQueue_accountSeen(seenAccounts, queueEntry) === false) { - this.processQueue_markAccountsSeen(seenAccounts, queueEntry) - - // TODO STATESHARDING4 Check if we have already commited the data from a receipt we saw earlier - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting : ${queueEntry.logID} `) - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_commitingTx', `${shortID}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug( ` processAcceptedTxQueue2. ${queueEntry.entryID} timestamp: ${queueEntry.txKeys.timestamp}`) - - // TODO STATESHARDING4 SYNC related need to reconsider how to set this up again - // if (queueEntry.didSync) { - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_commiting', `${queueEntry.acceptedTx.id}`, ` qId: ${queueEntry.entryID}`) - // // if we did sync it is time to JIT query local data. alternatively could have other nodes send us this data, but that could be very high bandwidth. - // for (let key of queueEntry.syncKeys) { - // let wrappedState = await this.app.getRelevantData(key, queueEntry.acceptedTx.data) - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_getLocalData', `${queueEntry.acceptedTx.id}`, ` qId: ${queueEntry.entryID} key:${utils.makeShortHash(key)} hash:${wrappedState.stateId}`) - // queueEntry.localCachedData[key] = wrappedState.localCache - // } - // } - - if (queueEntry.debugFail_failNoRepair) { - this.updateTxState(queueEntry, 'fail') - this.removeFromQueue(queueEntry, currentIndex) - nestedCountersInstance.countEvent('stateManager', 'debugFail_failNoRepair') - this.statemanager_fatal( - `processAcceptedTxQueue_debugFail_failNoRepair`, - `processAcceptedTxQueue_debugFail_failNoRepair tx: ${shortID} cycle:${ - queueEntry.cycleToRecordOn - } accountkeys: ${utils.stringifyReduce(queueEntry.uniqueWritableKeys)}` - ) - this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) - continue - } - - const wrappedStates = queueEntry.collectedData // Object.values(queueEntry.collectedData) - - //TODO apply the data we got!!! (override wrapped states) - // if(this.executeInOneShard){ - // for(let key of Object.keys(queueEntry.collectedFinalData)){ - // wrappedStates[key] = queueEntry.collectedFinalData[key] - // } - // } - // make sure the branches below will use this data correctly - - // commit queueEntry.preApplyTXResult.applyResponse.... hmm - // aslo is queueEntry.preApplyTXResult.applyResponse use above in tex data tell? - - // console.log('Commiting TX', queueEntry.acceptedTx.txId, queueEntry) - - try { - let canCommitTX = true - let hasReceiptFail = false - if (queueEntry.noConsensus === true) { - // dont have a receipt for a non consensus TX. not even sure if we want to keep that! - if (queueEntry.preApplyTXResult.passed === false) { - canCommitTX = false - } - } else if (queueEntry.signedReceipt != null) { - // the final state of the queue entry will be pass or fail based on the receipt - if (queueEntry.signedReceipt.proposal.applied === false) { - canCommitTX = false - hasReceiptFail = true - } - } else if (queueEntry.receivedSignedReceipt != null) { - // the final state of the queue entry will be pass or fail based on the receipt - if (queueEntry.receivedSignedReceipt.proposal.applied === false) { - canCommitTX = false - if (configContext.stateManager.receiptRemoveFix) { - hasReceiptFail = true - } else { - hasReceiptFail = false - } - } - } else { - canCommitTX = false - } - - nestedCountersInstance.countEvent( - 'stateManager', - `canCommitTX: ${canCommitTX}, hasReceiptFail: ${hasReceiptFail}` - ) - - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.debug) this.mainLogger.debug('shrd_commitingTx', `${shortID}`, `canCommitTX: ${canCommitTX}, hasReceiptFail: ${hasReceiptFail}`) - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_commitingTx', `${shortID}`, `canCommitTX: ${canCommitTX} `) - if (canCommitTX) { - // this.mainLogger.debug(` processAcceptedTxQueue2. applyAcceptedTransaction ${queueEntry.entryID} timestamp: ${queueEntry.txKeys.timestamp} queuerestarts: ${localRestartCounter} queueLen: ${this.newAcceptedTxQueue.length}`) - - // Need to go back and thing on how this was supposed to work: - //queueEntry.acceptedTx.transactionGroup = queueEntry.transactionGroup // Used to not double count txProcessed - - //try { - this.profiler.profileSectionStart('commit') - - const awaitStart = shardusGetTime() - /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.commitConsensedTransaction()' ) - await this.commitConsensedTransaction(queueEntry) - /* prettier-ignore */ this.setDebugLastAwaitedCall( 'this.stateManager.transactionConsensus.commitConsensedTransaction()', DebugComplete.Completed ) - this.updateSimpleStatsObject( - processStats.awaitStats, - 'commitConsensedTransaction', - shardusGetTime() - awaitStart - ) - - if (queueEntry.repairFinished) { - // saw a TODO comment above and befor I axe it want to confirm what is happening after we repair a receipt. - // shouldn't get here putting this in to catch if we do - this.statemanager_fatal(`processAcceptedTxQueue_commitingRepairedReceipt`, `${shortID} `) - nestedCountersInstance.countEvent('processing', 'commiting a repaired TX...') - } - - nestedCountersInstance.countEvent('stateManager', 'committed tx') - if (queueEntry.hasValidFinalData === false) { - nestedCountersInstance.countEvent('stateManager', 'commit state fix FinalDataFlag') - queueEntry.hasValidFinalData = true - } - - //} finally { - this.profiler.profileSectionEnd('commit') - //} - } - if (logFlags.verbose) - console.log('commit commit', queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp) - if (this.config.p2p.experimentalSnapshot) this.addReceiptToForward(queueEntry, 'commit') - - if (hasReceiptFail) { - // endpoint to allow dapp to execute something that depends on a transaction failing - - const applyReponse = queueEntry.preApplyTXResult.applyResponse // TODO STATESHARDING4 ... if we get here from a non standard path may need to get this data from somewhere else - - this.app.transactionReceiptFail(queueEntry.acceptedTx.data, wrappedStates, applyReponse) - } - } catch (ex) { - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug('processAcceptedTxQueue2 commiting Transaction:' + ex.name + ': ' + ex.message + ' at ' + ex.stack) - this.statemanager_fatal( - `processAcceptedTxQueue2b_ex`, - 'processAcceptedTxQueue2 commiting Transaction:' + ex.name + ': ' + ex.message + ' at ' + ex.stack - ) - } finally { - this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) - - if (queueEntry.noConsensus === true) { - // dont have a receipt for a non consensus TX. not even sure if we want to keep that! - if (queueEntry.preApplyTXResult.passed === true) { - this.updateTxState(queueEntry, 'pass') - } else { - this.updateTxState(queueEntry, 'fail') - } - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting finished : noConsensus:${queueEntry.state} ${queueEntry.logID} `) - } else if (queueEntry.signedReceipt != null) { - // the final state of the queue entry will be pass or fail based on the receipt - if (queueEntry.signedReceipt.proposal.applied === true) { - this.updateTxState(queueEntry, 'pass') - } else { - this.updateTxState(queueEntry, 'fail') - } - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting finished : Recpt:${queueEntry.state} ${queueEntry.logID} `) - } else if (queueEntry.receivedSignedReceipt != null) { - // the final state of the queue entry will be pass or fail based on the receipt - if (queueEntry.receivedSignedReceipt.proposal.applied === true) { - this.updateTxState(queueEntry, 'pass') - } else { - this.updateTxState(queueEntry, 'fail') - } - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 commiting finished : recvRecpt:${queueEntry.state} ${queueEntry.logID} `) - } else { - this.updateTxState(queueEntry, 'fail') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`processAcceptedTxQueue2 commiting finished : no receipt ${queueEntry.logID} `) - } - - /* prettier-ignore */ if (logFlags.verbose) if (logFlags.playback) this.logger.playbackLogNote('shrd_commitingTxFinished', `${queueEntry.acceptedTx.txId}`, `qId: ${queueEntry.entryID} qRst:${localRestartCounter} values: ${this.processQueue_debugAccountData(queueEntry, app)} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - - //moved to end of finally because this does some compacting on the queue entry - this.removeFromQueue(queueEntry, currentIndex) - } - - // TODO STATESHARDING4 SYNC related.. need to consider how we will re activate this - // // do we have any syncing neighbors? - // if (this.stateManager.currentCycleShardData.hasSyncingNeighbors === true && queueEntry.globalModification === false) { - // // let dataToSend = Object.values(queueEntry.collectedData) - // let dataToSend = [] - - // let keys = Object.keys(queueEntry.originalData) - // for (let key of keys) { - // dataToSend.push(JSON.parse(queueEntry.originalData[key])) - // } - - // // maybe have to send localcache over, or require the syncing node to grab this data itself JIT! - // // let localCacheTransport = Object.values(queueEntry.localCachedData) - - // // send data to syncing neighbors. - // if (this.stateManager.currentCycleShardData.syncingNeighbors.length > 0) { - // let message = { stateList: dataToSend, txid: queueEntry.acceptedTx.id } - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_dataTell', `${queueEntry.acceptedTx.id}`, ` qId: ${queueEntry.entryID} AccountBeingShared: ${utils.stringifyReduce(queueEntry.txKeys.allKeys)} txid: ${utils.makeShortHash(message.txid)} nodes:${utils.stringifyReduce(this.stateManager.currentCycleShardData.syncingNeighbors.map(x => x.id))}`) - // this.p2p.tell(this.stateManager.currentCycleShardData.syncingNeighbors, 'broadcast_state', message) - // } - // } - } - } - if (queueEntry.state === 'canceled') { - ///////////////////////////////////////////////--canceled--//////////////////////////////////////////////////////////// - //need to review this state look unused - this.processQueue_clearAccountsSeen(seenAccounts, queueEntry) - this.removeFromQueue(queueEntry, currentIndex) - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`processAcceptedTxQueue2 canceled : ${queueEntry.logID} `) - nestedCountersInstance.countEvent('stateManager', 'canceled') - } - } finally { - this.profiler.profileSectionEnd(`process-${pushedProfilerTag}`) - if (logFlags.profiling_verbose) - profilerInstance.scopedProfileSectionEnd(`scoped-process-${pushedProfilerTag}`) - - //let do some more stats work - const txElapsed = shardusGetTime() - txStartTime - if (queueEntry.state != pushedProfilerTag) { - processStats.stateChanged++ - this.updateSimpleStatsObject(processStats.stateChangedStats, pushedProfilerTag, txElapsed) - } else { - processStats.sameState++ - this.updateSimpleStatsObject(processStats.sameStateStats, pushedProfilerTag, txElapsed) - } - - pushedProfilerTag = null // clear the tag - } - } - } finally { - //Handle an odd case where the finally did not catch exiting scope. - if (pushedProfilerTag != null) { - this.profiler.profileSectionEnd(`process-${pushedProfilerTag}`) - this.profiler.profileSectionEnd(`process-patched1-${pushedProfilerTag}`) - pushedProfilerTag = null - } - - const processTime = shardusGetTime() - startTime - - processStats.totalTime = processTime - - this.finalizeSimpleStatsObject(processStats.awaitStats) - this.finalizeSimpleStatsObject(processStats.sameStateStats) - this.finalizeSimpleStatsObject(processStats.stateChangedStats) - - this.lastProcessStats['latest'] = processStats - if (processTime > 10000) { - nestedCountersInstance.countEvent('stateManager', 'processTime > 10s') - this.statemanager_fatal( - `processAcceptedTxQueue excceded time ${processTime / 1000} firstTime:${firstTime}`, - `processAcceptedTxQueue excceded time ${ - processTime / 1000 - } firstTime:${firstTime} stats:${Utils.safeStringify(processStats)}` - ) - this.lastProcessStats['10+'] = processStats - } else if (processTime > 5000) { - nestedCountersInstance.countEvent('stateManager', 'processTime > 5s') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`processTime > 5s ${processTime / 1000} stats:${Utils.safeStringify(processStats)}`) - this.lastProcessStats['5+'] = processStats - } else if (processTime > 2000) { - nestedCountersInstance.countEvent('stateManager', 'processTime > 2s') - /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`processTime > 2s ${processTime / 1000} stats:${Utils.safeStringify(processStats)}`) - this.lastProcessStats['2+'] = processStats - } else if (processTime > 1000) { - nestedCountersInstance.countEvent('stateManager', 'processTime > 1s') - /* prettier-ignore */ if (logFlags.error && logFlags.verbose) this.mainLogger.error(`processTime > 1s ${processTime / 1000} stats:${Utils.safeStringify(processStats)}`) - this.lastProcessStats['1+'] = processStats - } - - // restart loop if there are still elements in it - if (this._transactionQueue.length > 0 || this.pendingTransactionQueue.length > 0) { - this.transactionQueueHasRemainingWork = true - setTimeout(() => { - this.stateManager.tryStartTransactionProcessingQueue() - }, 15) - } else { - if (logFlags.seqdiagram) - this.mainLogger.info( - `0x10052024 ${ipInfo.externalIp} ${shardusGetTime()} 0x0000 processTransactions _transactionQueue.length 0` - ) - this.transactionQueueHasRemainingWork = false - } - - this.transactionProcessingQueueRunning = false - this.processingLastRunTime = shardusGetTime() - this.stateManager.lastSeenAccountsMap = seenAccounts - - this.profiler.profileSectionEnd('processQ') - } - } - - private setTXExpired(queueEntry: QueueEntry, currentIndex: number, message: string): void { - /* prettier-ignore */ if (logFlags.verbose || this.stateManager.consensusLog) this.mainLogger.debug(`setTXExpired tx:${queueEntry.logID} ${message} ts:${queueEntry.acceptedTx.timestamp} debug:${utils.stringifyReduce(queueEntry.debug)} state: ${queueEntry.state}, isInExecution: ${queueEntry.isInExecutionHome}`) - this.updateTxState(queueEntry, 'expired') - this.removeFromQueue(queueEntry, currentIndex) - this.app.transactionReceiptFail( - queueEntry.acceptedTx.data, - queueEntry.collectedData, - queueEntry.preApplyTXResult?.applyResponse - ) - this.stateManager.eventEmitter.emit('txExpired', queueEntry.acceptedTx.txId) - - /* prettier-ignore */ nestedCountersInstance.countEvent( 'txExpired', `tx: ${this.app.getSimpleTxDebugValue(queueEntry.acceptedTx?.data)}` ) - - //This is really important. If we are going to expire a TX, then look to see if we already have a receipt for it. - //If so, then just go into async receipt repair mode for the TX AFTER it has been expired and removed from the queue - if (queueEntry.signedReceiptFinal != null) { - const startRepair = queueEntry.repairStarted === false - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`setTXExpired. ${queueEntry.logID} start repair:${startRepair}. update `) - if (startRepair) { - nestedCountersInstance.countEvent('repair1', 'setTXExpired: start repair') - queueEntry.signedReceiptForRepair = queueEntry.signedReceiptFinal - //todo any limits to how many repairs at once to allow? - this.stateManager.getTxRepair().repairToMatchReceipt(queueEntry) - } - } else { - nestedCountersInstance.countEvent('repair1', 'setTXExpired: no receipt to repair') - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`setTXExpired. no receipt to repair ${queueEntry.logID}`) - } - } - private setTxAlmostExpired(queueEntry: QueueEntry, currentIndex: number, message: string): void { - /* prettier-ignore */ if (logFlags.verbose || this.stateManager.consensusLog) this.mainLogger.debug(`setTxAlmostExpired tx:${queueEntry.logID} ${message} ts:${queueEntry.acceptedTx.timestamp} debug:${utils.stringifyReduce(queueEntry.debug)}`) - // this.updateTxState(queueEntry, 'almostExpired') - queueEntry.almostExpired = true - - /* prettier-ignore */ nestedCountersInstance.countEvent("txAlmostExpired", `tx: ${this.app.getSimpleTxDebugValue(queueEntry.acceptedTx?.data)}`) - } - - async getArchiverReceiptFromQueueEntry(queueEntry: QueueEntry): Promise { - if (!queueEntry.preApplyTXResult || !queueEntry.preApplyTXResult.applyResponse) { - /* prettier-ignore */ if (logFlags.verbose) console.log('getArchiverReceiptFromQueueEntry : no preApplyTXResult or applyResponse, returning null receipt') - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'getArchiverReceiptFromQueueEntry no preApplyTXResult or applyResponse') - return null as ArchiverReceipt - } - - const txId = queueEntry.acceptedTx.txId - const timestamp = queueEntry.acceptedTx.timestamp - const globalModification = queueEntry.globalModification - - let signedReceipt = null as SignedReceipt | P2PTypes.GlobalAccountsTypes.GlobalTxReceipt - if (globalModification) { - signedReceipt = getGlobalTxReceipt(queueEntry.acceptedTx.txId) as P2PTypes.GlobalAccountsTypes.GlobalTxReceipt - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : globalModification signedReceipt txid', txId) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : globalModification signedReceipt signs', txId, Utils.safeStringify(signedReceipt.signs)) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : globalModification signedReceipt tx', txId, Utils.safeStringify(signedReceipt.tx)) - } else { - signedReceipt = this.stateManager.getSignedReceipt(queueEntry) as SignedReceipt - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt txid', txId) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt proposal', txId, Utils.safeStringify(signedReceipt.proposal)) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt proposalHash', txId, Utils.safeStringify(signedReceipt.proposalHash)) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt signaturePack', txId, Utils.safeStringify(signedReceipt.signaturePack)) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : nonGlobal signedReceipt voteOffsets', txId, Utils.safeStringify(signedReceipt.voteOffsets)) - } - if (!signedReceipt) { - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'getArchiverReceiptFromQueueEntry no signedReceipt') - /* prettier-ignore */ if (logFlags.important_as_error) console.log(`getArchiverReceiptFromQueueEntry: signedReceipt is null for txId: ${txId} timestamp: ${timestamp} globalModification: ${globalModification}`) - return null as ArchiverReceipt - } - - const accountsToAdd: { [accountId: string]: Shardus.AccountsCopy } = {} - const beforeAccountsToAdd: { [accountId: string]: Shardus.AccountsCopy } = {} - - if (globalModification) { - signedReceipt = signedReceipt as P2PTypes.GlobalAccountsTypes.GlobalTxReceipt - if (signedReceipt.tx && signedReceipt.tx.addressHash != '' && !beforeAccountsToAdd[signedReceipt.tx.address]) { - console.log(queueEntry.collectedData[signedReceipt.tx.address].stateId, signedReceipt.tx.addressHash) - if (queueEntry.collectedData[signedReceipt.tx.address].stateId === signedReceipt.tx.addressHash) { - const isGlobal = this.stateManager.accountGlobals.isGlobalAccount(signedReceipt.tx.addressHash) - const account = queueEntry.collectedData[signedReceipt.tx.address] - const accountCopy = { - accountId: account.accountId, - data: account.data, - hash: account.stateId, - timestamp: account.timestamp, - isGlobal, - } as Shardus.AccountsCopy - beforeAccountsToAdd[account.accountId] = accountCopy - } else { - console.log( - `getArchiverReceiptFromQueueEntry: before stateId does not match addressHash for txId: ${txId} timestamp: ${timestamp} globalModification: ${globalModification}` - ) - } - } - } else if (this.config.stateManager.includeBeforeStatesInReceipts) { - // simulate debug case - if (configContext.mode === 'debug' && configContext.debug.beforeStateFailChance > Math.random()) { - for (const accountId in queueEntry.collectedData) { - const account = queueEntry.collectedData[accountId] - account.stateId = 'debugFail2' - } - } - - const fileredBeforeStateToSend = [] - const badBeforeStateAccounts = [] - - for (const account of Object.values(queueEntry.collectedData)) { - if (typeof this.app.beforeStateAccountFilter !== 'function' || this.app.beforeStateAccountFilter(account)) { - fileredBeforeStateToSend.push(account.accountId) - } - } - - // prepare before state accounts - for (const accountId of fileredBeforeStateToSend) { - signedReceipt = signedReceipt as SignedReceipt - // check if our beforeState account hash is the same as the vote in the receipt2 - const index = signedReceipt.proposal.accountIDs.indexOf(accountId) - if (index === -1) continue - const account = queueEntry.collectedData[accountId] - if (account == null) { - badBeforeStateAccounts.push(accountId) - continue - } - if (account.stateId !== signedReceipt.proposal.beforeStateHashes[index]) { - badBeforeStateAccounts.push(accountId) - } - } - - if (badBeforeStateAccounts.length > 0) { - nestedCountersInstance.countEvent( - 'stateManager', - 'badBeforeStateAccounts in getArchiverReceiptFromQueueEntry', - badBeforeStateAccounts.length - ) - - // repair bad before state accounts - const wrappedResponses: WrappedResponses = await this.requestInitialData(queueEntry, badBeforeStateAccounts) - for (const accountId in wrappedResponses) { - queueEntry.collectedData[accountId] = wrappedResponses[accountId] - } - } - - // add before state accounts - for (const accountId of fileredBeforeStateToSend) { - const account = queueEntry.collectedData[accountId] - const isGlobal = this.stateManager.accountGlobals.isGlobalAccount(account.accountId) - const accountCopy = { - accountId: account.accountId, - data: account.data, - hash: account.stateId, - timestamp: account.timestamp, - isGlobal, - } as Shardus.AccountsCopy - beforeAccountsToAdd[account.accountId] = accountCopy - } - } - - let isAccountsMatchWithReceipt2 = true - const accountWrites = queueEntry.preApplyTXResult?.applyResponse?.accountWrites - - if (globalModification) { - if (accountWrites === null || accountWrites.length === 0) { - console.log('No account update in global Modification tx', txId, timestamp) - } - } else if ( - accountWrites != null && - accountWrites.length === (signedReceipt as SignedReceipt).proposal.accountIDs.length - ) { - signedReceipt = signedReceipt as SignedReceipt - for (const account of accountWrites) { - const indexInVote = signedReceipt.proposal.accountIDs.indexOf(account.accountId) - if (signedReceipt.proposal.afterStateHashes[indexInVote] !== account.data.stateId) { - // console.log('Found afterStateHash mismatch', account.accountId, receipt2.proposal.afterStateHashes[indexInVote], account.data.stateId) - isAccountsMatchWithReceipt2 = false - break - } - } - } else { - isAccountsMatchWithReceipt2 = false - } - - let finalAccounts = [] - let appReceiptData = queueEntry.preApplyTXResult?.applyResponse?.appReceiptData || null - if (isAccountsMatchWithReceipt2) { - finalAccounts = accountWrites - } else { - signedReceipt = signedReceipt as SignedReceipt - // request the final accounts and appReceiptData - let success = false - let count = 0 - const maxRetry = 3 - const nodesToAskKeys = signedReceipt.signaturePack?.map((signature) => signature.owner) - - // retry 3 times if the request fails - while (success === false && count < maxRetry) { - count++ - const requestedData = await this.requestFinalData( - queueEntry, - signedReceipt.proposal.accountIDs, - nodesToAskKeys, - true - ) - if (requestedData && requestedData.wrappedResponses && requestedData.appReceiptData) { - success = true - for (const accountId in requestedData.wrappedResponses) { - finalAccounts.push(requestedData.wrappedResponses[accountId]) - } - appReceiptData = requestedData.appReceiptData - } - } - } - - // override with the accounts in accountWrites - for (const account of finalAccounts) { - const isGlobal = this.stateManager.accountGlobals.isGlobalAccount(account.accountId) - const accountCopy = { - accountId: account.accountId, - data: account.data.data, - timestamp: account.timestamp, - hash: account.data.stateId, - isGlobal, - } as Shardus.AccountsCopy - accountsToAdd[account.accountId] = accountCopy - } - - // MIGHT NOT NEED THIS NOW WITH THE POQo RECEIPT REWRITE. NEED TO CONFIRM - // if (!globalModification && this.useNewPOQ === false) { - // appliedReceipt = appliedReceipt as AppliedReceipt2 - // if (appliedReceipt.appliedVote) { - // delete appliedReceipt.appliedVote.node_id - // delete appliedReceipt.appliedVote.sign - // delete appliedReceipt.confirmOrChallenge - // // Update the app_data_hash with the app_data_hash from the appliedVote - // appliedReceipt.app_data_hash = appliedReceipt.appliedVote.app_data_hash - // } - // } - - const archiverReceipt: ArchiverReceipt = { - tx: { - originalTxData: queueEntry.acceptedTx.data, - txId: queueEntry.acceptedTx.txId, - timestamp: queueEntry.acceptedTx.timestamp, - }, - signedReceipt, - appReceiptData, - beforeStates: [...Object.values(beforeAccountsToAdd)], - afterStates: [...Object.values(accountsToAdd)], - cycle: queueEntry.txGroupCycle, - globalModification, - } - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : archiverReceipt', txId, Utils.safeStringify(archiverReceipt)) - /* prettier-ignore */ if (logFlags.important_as_error) console.log('getArchiverReceiptFromQueueEntry : originalTxData object', txId, Utils.safeStringify(archiverReceipt.tx.originalTxData)) - - return archiverReceipt - } - - addOriginalTxDataToForward(queueEntry: QueueEntry): void { - if (logFlags.verbose) console.log('originalTxData', queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp) - const { acceptedTx } = queueEntry - const originalTxData = { - txId: acceptedTx.txId, - originalTxData: acceptedTx.data, - cycle: queueEntry.cycleToRecordOn, - timestamp: acceptedTx.timestamp, - } - // const signedOriginalTxData: any = this.crypto.sign(originalTxData) // maybe we don't need to send by signing it - Archivers.instantForwardOriginalTxData(originalTxData) - } - - async addReceiptToForward(queueEntry: QueueEntry, debugString = ''): Promise { - if (logFlags.verbose) - console.log('addReceiptToForward', queueEntry.acceptedTx.txId, queueEntry.acceptedTx.timestamp, debugString) - const archiverReceipt = await this.getArchiverReceiptFromQueueEntry(queueEntry) - Archivers.instantForwardReceipts([archiverReceipt]) - this.receiptsForwardedTimestamp = shardusGetTime() - this.forwardedReceiptsByTimestamp.set(this.receiptsForwardedTimestamp, archiverReceipt) - // this.receiptsToForward.push(archiverReceipt) - } - - getReceiptsToForward(): ArchiverReceipt[] { - return [...this.forwardedReceiptsByTimestamp.values()] - } - - // eslint-disable-next-line @typescript-eslint/explicit-function-return-type - async requestFinalData( - queueEntry: QueueEntry, - accountIds: string[], - nodesToAskKeys: string[] | null = null, - includeAppReceiptData = false - ): Promise { - profilerInstance.profileSectionStart('requestFinalData') - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} accountIds: ${utils.stringifyReduce(accountIds)}`); - const message = { txid: queueEntry.acceptedTx.txId, accountIds, includeAppReceiptData } - let success = false - let successCount = 0 - let validAppReceiptData = includeAppReceiptData === false ? true : false - - // first check if we have received final data - for (const accountId of accountIds) { - // eslint-disable-next-line security/detect-object-injection - if (queueEntry.collectedFinalData[accountId] != null) { - successCount++ - } - } - if (successCount === accountIds.length && includeAppReceiptData === false) { - nestedCountersInstance.countEvent('stateManager', 'requestFinalDataAlreadyReceived') - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} already received all data`) - // no need to request data - return - } - - try { - let nodeToAsk: Shardus.Node - if (nodesToAskKeys && nodesToAskKeys.length > 0) { - const randomIndex = Math.floor(Math.random() * nodesToAskKeys.length) - // eslint-disable-next-line security/detect-object-injection - const randomNodeToAskKey = nodesToAskKeys[randomIndex] - nodeToAsk = byPubKey.get(randomNodeToAskKey) - } else { - const randomIndex = Math.floor(Math.random() * queueEntry.executionGroup.length) - // eslint-disable-next-line security/detect-object-injection - const randomExeNode = queueEntry.executionGroup[randomIndex] - nodeToAsk = nodes.get(randomExeNode.id) - } - - if (!nodeToAsk) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('requestFinalData: could not find node from execution group') - throw new Error('requestFinalData: could not find node from execution group') - } - - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug( `requestFinalData: txid: ${queueEntry.acceptedTx.txId} accountIds: ${utils.stringifyReduce( accountIds )}, asking node: ${nodeToAsk.id} ${nodeToAsk.externalPort} at timestamp ${shardusGetTime()}` ) - - // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.requestTxAndStateBinary) { - const requestMessage = message as RequestTxAndStateReq - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(nodeToAsk.id)}: ${'request_tx_and_state'}`) - const response = await Comms.askBinary( - nodeToAsk, - InternalRouteEnum.binary_request_tx_and_state, - requestMessage, - serializeRequestTxAndStateReq, - deserializeRequestTxAndStateResp, - {} - ) - // } else response = await Comms.ask(nodeToAsk, 'request_tx_and_state', message) - - if (response && response.stateList && response.stateList.length > 0) { - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} received data for ${response.stateList.length} accounts`) - } else { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestFinalData: txid: ${queueEntry.logID} response is null`) - nestedCountersInstance.countEvent( - 'stateManager', - 'requestFinalData: failed: response or response.stateList null or statelist length 0' - ) - return - } - - for (const data of response.stateList) { - if (data == null) { - /* prettier-ignore */ - if (logFlags.error && logFlags.debug) this.mainLogger.error(`requestFinalData data == null for tx ${queueEntry.logID}`); - success = false - break - } - const indexInVote = queueEntry.signedReceipt.proposal.accountIDs.indexOf(data.accountId) - if (indexInVote === -1) continue - const afterStateIdFromVote = queueEntry.signedReceipt.proposal.afterStateHashes[indexInVote] - if (data.stateId !== afterStateIdFromVote) { - nestedCountersInstance.countEvent('stateManager', 'requestFinalDataMismatch') - continue - } - if (queueEntry.collectedFinalData[data.accountId] == null) { - // todo: check the state hashes and verify - queueEntry.collectedFinalData[data.accountId] = data - successCount++ - /* prettier-ignore */ - if (logFlags.debug) this.mainLogger.debug(`requestFinalData: txid: ${queueEntry.logID} success accountId: ${data.accountId} stateId: ${data.stateId}`); - } - } - if (includeAppReceiptData && response.appReceiptData) { - const receivedAppReceiptDataHash = this.crypto.hash(response.appReceiptData) - const receipt2 = this.stateManager.getSignedReceipt(queueEntry) - if (receipt2 != null) { - validAppReceiptData = receivedAppReceiptDataHash === receipt2.proposal.appReceiptDataHash - } - } - if (successCount === accountIds.length && validAppReceiptData === true) { - success = true - - //setting this for completeness. if our node is awaiting final data it will utilize what was looked up here - queueEntry.hasValidFinalData = true - return { wrappedResponses: queueEntry.collectedFinalData, appReceiptData: response.appReceiptData } - } else { - nestedCountersInstance.countEvent( - 'stateManager', - `requestFinalData: failed: did not get enough data: ${successCount} < ${accountIds.length}` - ) - } - } catch (e) { - nestedCountersInstance.countEvent('stateManager', 'requestFinalData: failed: Error') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestFinalData: txid: ${queueEntry.logID} error: ${e.message}`) - } finally { - if (success === false) { - nestedCountersInstance.countEvent('stateManager', 'requestFinalData: failed: success === false') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestFinalData: txid: ${queueEntry.logID} failed. successCount: ${successCount} accountIds: ${accountIds.length}`); - } - } - profilerInstance.profileSectionEnd('requestFinalData') - } - - async requestInitialData(queueEntry: QueueEntry, accountIds: string[]): Promise { - profilerInstance.profileSectionStart('requestInitialData') - this.mainLogger.debug( - `requestInitialData: txid: ${queueEntry.logID} accountIds: ${utils.stringifyReduce(accountIds)}` - ) - const message = { txid: queueEntry.acceptedTx.txId, accountIds } - let success = false - let successCount = 0 - let retries = 0 - const maxRetry = 3 - const triedNodes = new Set() - - if (queueEntry.executionGroup == null) return - - while (retries < maxRetry) { - const executionNodeIds = queueEntry.executionGroup.map((node) => node.id) - const randomExeNodeId = utils.getRandom(executionNodeIds, 1)[0] - if (triedNodes.has(randomExeNodeId)) continue - if (randomExeNodeId === Self.id) continue - const nodeToAsk = nodes.get(randomExeNodeId) - if (!nodeToAsk) { - if (logFlags.error) this.mainLogger.error('requestInitialData: could not find node from execution group') - throw new Error('requestInitialData: could not find node from execution group') - } - triedNodes.add(randomExeNodeId) - retries++ - try { - if (logFlags.debug) - this.mainLogger.debug( - `requestInitialData: txid: ${queueEntry.acceptedTx.txId} accountIds: ${utils.stringifyReduce( - accountIds - )}, asking node: ${nodeToAsk.id} ${nodeToAsk.externalPort} at timestamp ${shardusGetTime()}` - ) - - const requestMessage = message as RequestTxAndStateReq - /* prettier-ignore */ if (logFlags.seqdiagram) this.seqLogger.info(`0x53455101 ${shardusGetTime()} tx:${queueEntry.acceptedTx.txId} ${NodeList.activeIdToPartition.get(Self.id)}-->>${NodeList.activeIdToPartition.get(nodeToAsk.id)}: ${'request_tx_and_state'}`) - const response = await Comms.askBinary( - nodeToAsk, - InternalRouteEnum.binary_request_tx_and_state_before, - requestMessage, - serializeRequestTxAndStateReq, - deserializeRequestTxAndStateResp, - {} - ) - - if (response && response.stateList && response.stateList.length === accountIds.length) { - this.mainLogger.debug( - `requestInitialData: txid: ${queueEntry.logID} received data for ${response.stateList.length} accounts` - ) - } else { - this.mainLogger.error(`requestInitialData: txid: ${queueEntry.logID} response is null or incomplete`) - continue - } - - const results: WrappedResponses = {} - const receipt2 = this.stateManager.getSignedReceipt(queueEntry) - if (receipt2 == null) { - return - } - if (receipt2.proposal.accountIDs.length !== response.stateList.length) { - if (logFlags.error && logFlags.debug) - this.mainLogger.error(`requestInitialData data.length not matching for tx ${queueEntry.logID}`) - return - } - for (const data of response.stateList) { - if (data == null) { - /* prettier-ignore */ - if (logFlags.error && logFlags.debug) this.mainLogger.error(`requestInitialData data == null for tx ${queueEntry.logID}`); - success = false - break - } - const indexInVote = receipt2.proposal.accountIDs.indexOf(data.accountId) - if (data.stateId === receipt2.proposal.beforeStateHashes[indexInVote]) { - successCount++ - results[data.accountId] = data - /* prettier-ignore */ - if (logFlags.debug) this.mainLogger.debug(`requestInitialData: txid: ${queueEntry.logID} success accountId: ${data.accountId} stateId: ${data.stateId}`); - } - } - return results - } catch (e) { - nestedCountersInstance.countEvent('stateManager', 'requestInitialDataError') - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestInitialData: txid: ${queueEntry.logID} error: ${e.message}`) - } - } - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`requestInitialData: txid: ${queueEntry.logID} failed. successCount: ${successCount} accountIds: ${accountIds.length}`); - profilerInstance.profileSectionEnd('requestInitialData') - } - - resetReceiptsToForward(): void { - const MAX_RECEIPT_AGE_MS = 15000 // 15s - const now = shardusGetTime() - // Clear receipts that are older than MAX_RECEIPT_AGE_MS - for (const [key] of this.forwardedReceiptsByTimestamp) { - if (now - key > MAX_RECEIPT_AGE_MS) { - this.forwardedReceiptsByTimestamp.delete(key) - } - } - } - - // getReceipt(queueEntry: QueueEntry): AppliedReceipt { - // if (queueEntry.appliedReceiptFinal != null) { - // return queueEntry.appliedReceiptFinal - // } - // // start with a receipt we made - // let receipt: AppliedReceipt = queueEntry.appliedReceipt - // if (receipt == null) { - // // or see if we got one - // receipt = queueEntry.recievedAppliedReceipt - // } - // // if we had to repair use that instead. this stomps the other ones - // if (queueEntry.appliedReceiptForRepair != null) { - // receipt = queueEntry.appliedReceiptForRepair - // } - // queueEntry.appliedReceiptFinal = receipt - // return receipt - // } - - /** - * processQueue_accountSeen - * Helper for processQueue to detect if this queueEntry has any accounts that are already blocked because they were seen upstream - * a seen account is a an account that is involved in a TX that is upstream(older) in the queue - * @param seenAccounts - * @param queueEntry - */ - processQueue_accountSeen(seenAccounts: SeenAccounts, queueEntry: QueueEntry): boolean { - if (this.config.debug.useShardusMemoryPatterns && queueEntry.shardusMemoryPatternSets != null) { - return this.processQueue_accountSeen2(seenAccounts, queueEntry) - } - - if (queueEntry.uniqueKeys == null) { - //TSConversion double check if this needs extra logging - return false - } - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (seenAccounts[key] != null) { - return true - } - } - return false - } - - processQueue_getUpstreamTx(seenAccounts: SeenAccounts, queueEntry: QueueEntry): QueueEntry | null { - if (this.config.debug.useShardusMemoryPatterns && queueEntry.shardusMemoryPatternSets != null) { - return null - } - if (queueEntry.uniqueKeys == null) { - //TSConversion double check if this needs extra logging - return null - } - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (seenAccounts[key] != null) { - return seenAccounts[key] - } - } - return null - } - - /** - * processQueue_markAccountsSeen - * Helper for processQueue to mark accounts as seen. - * note only operates on writeable accounts. a read only account should not block downstream operations - * a seen account is a an account that is involved in a TX that is upstream(older) in the queue - * @param seenAccounts - * @param queueEntry - */ - processQueue_markAccountsSeen(seenAccounts: SeenAccounts, queueEntry: QueueEntry): void { - if (this.config.debug.useShardusMemoryPatterns && queueEntry.shardusMemoryPatternSets != null) { - this.processQueue_markAccountsSeen2(seenAccounts, queueEntry) - return - } - - if (queueEntry.uniqueWritableKeys == null) { - //TSConversion double check if this needs extra logging - return - } - // only mark writeable keys as seen but we will check/clear against all keys - /* eslint-disable security/detect-object-injection */ - for (const key of queueEntry.uniqueWritableKeys) { - if (seenAccounts[key] == null) { - seenAccounts[key] = queueEntry - } - } - /* eslint-enable security/detect-object-injection */ - } - - // this.queueReads = new Set() - // this.queueWrites = new Set() - processQueue_accountSeen2(seenAccounts: SeenAccounts, queueEntry: QueueEntry): boolean { - if (queueEntry.uniqueKeys == null) { - //TSConversion double check if this needs extra logging - return false - } - - if (queueEntry.shardusMemoryPatternSets != null) { - //normal blocking for read write - for (const id of queueEntry.shardusMemoryPatternSets.rw) { - if (this.queueWrites.has(id)) { - // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen rw queue_write') - // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen rw queue_write ${id}`) - return true - } - if (this.queueReadWritesOld.has(id)) { - // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen rw old queue_write') - // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen rw old queue_write ${id}`) - return true - } - //also blocked by upstream reads - if (this.queueReads.has(id)) { - // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen rw queue_read') - // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen rw queue_read ${id}`) - return true - } - } - // in theory write only is not blocked by upstream writes - // but has to wait its turn if there is an uptream read - for (const id of queueEntry.shardusMemoryPatternSets.wo) { - //also blocked by upstream reads - if (this.queueReads.has(id)) { - // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen wo queue_read') - // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen wo queue_read ${id}`) - return true - } - if (this.queueReadWritesOld.has(id)) { - // nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen wo queue_read_write_old') - // nestedCountersInstance.countEvent('stateManager', `shrd_accountSeen wo queue_read_write_old ${id}`) - return true - } - } - - // write once... also not blocked in theory, because the first op is a write - // this is a special case for something like code bytes that are written once - // and then immutable - // for (const id of queueEntry.shardusMemoryPatternSets.on) { - // if(this.queueWrites.has(id)){ - // return true - // } - // if(this.queueWritesOld.has(id)){ - // return true - // } - // } - - //read only blocks for upstream writes - for (const id of queueEntry.shardusMemoryPatternSets.ro) { - if (this.queueWrites.has(id)) { - nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen ro queue_write') - return true - } - if (this.queueReadWritesOld.has(id)) { - nestedCountersInstance.countEvent('stateManager', 'shrd_accountSeen ro queue_read_write_old') - return true - } - //note blocked by upstream reads, because this read only operation - //will not impact the upstream read - } - - //we made it, not blocked - return false - } - - for (const key of queueEntry.uniqueKeys) { - // eslint-disable-next-line security/detect-object-injection - if (seenAccounts[key] != null) { - return true - } - } - - return false - } - - processQueue_markAccountsSeen2(seenAccounts: SeenAccounts, queueEntry: QueueEntry): void { - if (queueEntry.uniqueWritableKeys == null) { - //TSConversion double check if this needs extra logging - return - } - - if (queueEntry.shardusMemoryPatternSets != null) { - for (const id of queueEntry.shardusMemoryPatternSets.rw) { - this.queueWrites.add(id) - this.queueReads.add(id) - } - for (const id of queueEntry.shardusMemoryPatternSets.wo) { - this.queueWrites.add(id) - } - for (const id of queueEntry.shardusMemoryPatternSets.on) { - this.queueWrites.add(id) - } - for (const id of queueEntry.shardusMemoryPatternSets.ro) { - this.queueReads.add(id) - } - return - } - - // only mark writeable keys as seen but we will check/clear against all keys - /* eslint-disable security/detect-object-injection */ - for (const key of queueEntry.uniqueWritableKeys) { - if (seenAccounts[key] == null) { - seenAccounts[key] = queueEntry - } - //old style memory access is treated as RW: - this.queueReadWritesOld.add(key) - } - /* eslint-enable security/detect-object-injection */ - } - - /** - * processQueue_clearAccountsSeen - * Helper for processQueue to clear accounts that were marked as seen. - * a seen account is a an account that is involved in a TX that is upstream(older) in the queue - * @param seenAccounts - * @param queueEntry - */ - processQueue_clearAccountsSeen(seenAccounts: SeenAccounts, queueEntry: QueueEntry): void { - if (queueEntry.uniqueKeys == null) { - //TSConversion double check if this needs extra logging - return - } - /* eslint-disable security/detect-object-injection */ - for (const key of queueEntry.uniqueKeys) { - if (seenAccounts[key] != null && seenAccounts[key].logID === queueEntry.logID) { - if (logFlags.verbose) this.mainLogger.debug(`${new Date()}}clearing key ${key} for tx ${queueEntry.logID}`) - seenAccounts[key] = null - } - } - /* eslint-enable security/detect-object-injection */ - } - - /** - * Helper for processQueue to dump debug info - * @param queueEntry - * @param app - */ - processQueue_debugAccountData(queueEntry: QueueEntry, app: Shardus.App): string { - let debugStr = '' - //if (logFlags.verbose) { //this function is always verbose - if (queueEntry.uniqueKeys == null) { - //TSConversion double check if this needs extra logging - return queueEntry.logID + ' uniqueKeys empty error' - } - /* eslint-disable security/detect-object-injection */ - for (const key of queueEntry.uniqueKeys) { - if (queueEntry.collectedData[key] != null) { - debugStr += utils.makeShortHash(key) + ' : ' + app.getAccountDebugValue(queueEntry.collectedData[key]) + ', ' - } - } - /* eslint-enable security/detect-object-injection */ - //} - return debugStr - } - - /** - * txWillChangeLocalData - * This is a just in time check to see if a TX will modify any local accounts managed by this node. - * Not longer used. candidate for deprecation, but this may be useful in some logging/analysis later - * - * @param queueEntry - */ - txWillChangeLocalData(queueEntry: QueueEntry): boolean { - //if this TX modifies a global then return true since all nodes own all global accounts. - if (queueEntry.globalModification) { - return true - } - const timestamp = queueEntry.acceptedTx.timestamp - const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData - for (const key of queueEntry.uniqueWritableKeys) { - if (this.stateManager.accountGlobals.isGlobalAccount(key)) { - //ignore globals in non global mod tx. - continue + /** + * txWillChangeLocalData + * This is a just in time check to see if a TX will modify any local accounts managed by this node. + * Not longer used. candidate for deprecation, but this may be useful in some logging/analysis later + * + * @param queueEntry + */ + txWillChangeLocalData(queueEntry: QueueEntry): boolean { + //if this TX modifies a global then return true since all nodes own all global accounts. + if (queueEntry.globalModification) { + return true + } + const timestamp = queueEntry.acceptedTx.timestamp + const ourNodeData = this.stateManager.currentCycleShardData.nodeShardData + for (const key of queueEntry.uniqueWritableKeys) { + if (this.stateManager.accountGlobals.isGlobalAccount(key)) { + //ignore globals in non global mod tx. + continue } let hasKey = false @@ -8765,52 +1348,7 @@ class TransactionQueue { }) return stuckTxs - } - - getDebugProccessingStatus(): unknown { - let txDebug = '' - if (this.debugRecentQueueEntry != null) { - const app = this.app - const queueEntry = this.debugRecentQueueEntry - txDebug = `logID:${queueEntry.logID} state:${queueEntry.state} hasAll:${queueEntry.hasAll} globalMod:${queueEntry.globalModification}` - txDebug += ` qId: ${queueEntry.entryID} values: ${this.processQueue_debugAccountData( - queueEntry, - app - )} AcceptedTransaction: ${utils.stringifyReduce(queueEntry.acceptedTx)}` - } - return { - isStuckProcessing: this.isStuckProcessing, - transactionProcessingQueueRunning: this.transactionProcessingQueueRunning, - stuckProcessingCount: this.stuckProcessingCount, - stuckProcessingCyclesCount: this.stuckProcessingCyclesCount, - stuckProcessingQueueLockedCyclesCount: this.stuckProcessingQueueLockedCyclesCount, - processingLastRunTime: this.processingLastRunTime, - debugLastProcessingQueueStartTime: this.debugLastProcessingQueueStartTime, - debugLastAwaitedCall: this.debugLastAwaitedCall, - debugLastAwaitedCallInner: this.debugLastAwaitedCallInner, - debugLastAwaitedAppCall: this.debugLastAwaitedAppCall, - debugLastAwaitedCallInnerStack: this.debugLastAwaitedCallInnerStack, - debugLastAwaitedAppCallStack: this.debugLastAwaitedAppCallStack, - txDebug, - //todo get the transaction we are stuck on. what type is it? id etc. - } - } - - clearStuckProcessingDebugVars(): void { - this.isStuckProcessing = false - this.debugLastAwaitedCall = '' - this.debugLastAwaitedCallInner = '' - this.debugLastAwaitedAppCall = '' - this.debugLastAwaitedCallInnerStack = {} - this.debugLastAwaitedAppCallStack = {} - - this.debugRecentQueueEntry = null - this.debugLastProcessingQueueStartTime = 0 - - this.stuckProcessingCount = 0 - this.stuckProcessingCyclesCount = 0 - this.stuckProcessingQueueLockedCyclesCount = 0 - } + } /** * Used to unblock and restart the processing queue if it gets stuck @@ -8830,69 +1368,7 @@ class TransactionQueue { } this.stateManager.tryStartTransactionProcessingQueue() - } - - setDebugLastAwaitedCall(label: string, complete = DebugComplete.Incomplete): void { - this.debugLastAwaitedCall = label + (complete === DebugComplete.Completed ? ' complete' : '') - this.debugLastAwaitedCallInner = '' - this.debugLastAwaitedAppCall = '' - } - - setDebugLastAwaitedCallInner(label: string, complete = DebugComplete.Incomplete): void { - this.debugLastAwaitedCallInner = label + (complete === DebugComplete.Completed ? ' complete' : '') - this.debugLastAwaitedAppCall = '' - - if (complete === DebugComplete.Incomplete) { - // eslint-disable-next-line security/detect-object-injection - if (this.debugLastAwaitedCallInnerStack[label] == null) { - // eslint-disable-next-line security/detect-object-injection - this.debugLastAwaitedCallInnerStack[label] = 1 - } else { - // eslint-disable-next-line security/detect-object-injection - this.debugLastAwaitedCallInnerStack[label]++ - } - } else { - //decrement the count if it is greater than 1, delete the key if the count is 1 - // eslint-disable-next-line security/detect-object-injection - if (this.debugLastAwaitedCallInnerStack[label] != null) { - // eslint-disable-next-line security/detect-object-injection - if (this.debugLastAwaitedCallInnerStack[label] > 1) { - // eslint-disable-next-line security/detect-object-injection - this.debugLastAwaitedCallInnerStack[label]-- - } else { - // eslint-disable-next-line security/detect-object-injection - delete this.debugLastAwaitedCallInnerStack[label] - } - } - } - } - setDebugSetLastAppAwait(label: string, complete = DebugComplete.Incomplete): void { - this.debugLastAwaitedAppCall = label + (complete === DebugComplete.Completed ? ' complete' : '') - - if (complete === DebugComplete.Incomplete) { - // eslint-disable-next-line security/detect-object-injection - if (this.debugLastAwaitedAppCallStack[label] == null) { - // eslint-disable-next-line security/detect-object-injection - this.debugLastAwaitedAppCallStack[label] = 1 - } else { - // eslint-disable-next-line security/detect-object-injection - this.debugLastAwaitedAppCallStack[label]++ - } - } else { - //decrement the count if it is greater than 1, delete the key if the count is 1 - // eslint-disable-next-line security/detect-object-injection - if (this.debugLastAwaitedAppCallStack[label] != null) { - // eslint-disable-next-line security/detect-object-injection - if (this.debugLastAwaitedAppCallStack[label] > 1) { - // eslint-disable-next-line security/detect-object-injection - this.debugLastAwaitedAppCallStack[label]-- - } else { - // eslint-disable-next-line security/detect-object-injection - delete this.debugLastAwaitedAppCallStack[label] - } - } - } - } + } addressCountInQueue(address: string, limit: number): number { let count = 0 @@ -8963,44 +1439,7 @@ class TransactionQueue { if (this.archivedQueueEntriesByID.has(txId)) return this.getDebugQueueInfo(this.archivedQueueEntriesByID.get(txId)) return null } - getDebugQueueInfo(queueEntry: QueueEntry): any { - return { - txId: queueEntry.acceptedTx.txId, - tx: queueEntry.acceptedTx, - logID: queueEntry.logID, - nodeId: Self.id, - state: queueEntry.state, - hasAll: queueEntry.hasAll, - hasShardInfo: queueEntry.hasShardInfo, - isExecutionNode: queueEntry.isInExecutionHome, - globalModification: queueEntry.globalModification, - entryID: queueEntry.entryID, - txGroupCyle: queueEntry.txGroupCycle, - uniqueKeys: queueEntry.uniqueKeys, - collectedData: queueEntry.collectedData, - finalData: queueEntry.collectedFinalData, - preApplyResult: queueEntry.preApplyTXResult, - txAge: shardusGetTime() - queueEntry.acceptedTx.timestamp, - lastFinalDataRequestTimestamp: queueEntry.lastFinalDataRequestTimestamp, - dataSharedTimestamp: queueEntry.dataSharedTimestamp, - firstVoteTimestamp: queueEntry.firstVoteReceivedTimestamp, - lastVoteTimestamp: queueEntry.lastVoteReceivedTimestamp, - // firstConfirmationsTimestamp: queueEntry.firstConfirmOrChallengeTimestamp, - // robustBestConfirmation: queueEntry.receivedBestConfirmation, - // robustBestVote: queueEntry.receivedBestVote, - // robustBestChallenge: queueEntry.receivedBestChallenge, - // completedRobustVote: queueEntry.robustQueryVoteCompleted, - // completedRobustChallenge: queueEntry.robustQueryConfirmOrChallengeCompleted, - txDebug: queueEntry.txDebug, - executionDebug: queueEntry.executionDebug, - waitForReceiptOnly: queueEntry.waitForReceiptOnly, - ourVote: queueEntry.ourVote || null, - signedReceipt: this.stateManager.getSignedReceipt(queueEntry) || null, - // uniqueChallenges: queueEntry.uniqueChallengesCount, - collectedVoteCount: queueEntry.collectedVoteHashes.length, - simpleDebugStr: this.app.getSimpleTxDebugValue ? this.app.getSimpleTxDebugValue(queueEntry.acceptedTx?.data) : '', - } - } + // eslint-disable-next-line @typescript-eslint/explicit-function-return-type removeTxFromArchivedQueue(txId: string) { // remove from the archived queue array and map by txId @@ -9022,35 +1461,7 @@ class TransactionQueue { this.txDebugMarkEndTime(queueEntry, currentState) queueEntry.state = nextState this.txDebugMarkStartTime(queueEntry, nextState) - } - txDebugMarkStartTime(queueEntry: QueueEntry, state: string): void { - if (queueEntry.txDebug.startTime[state] == null) { - queueEntry.txDebug.startTime[state] = process.hrtime() - queueEntry.txDebug.startTimestamp[state] = shardusGetTime() - } - } - txDebugMarkEndTime(queueEntry: QueueEntry, state: string): void { - if (queueEntry.txDebug.startTime[state]) { - const endTime = process.hrtime(queueEntry.txDebug.startTime[state]) - queueEntry.txDebug.endTime[state] = endTime - queueEntry.txDebug.endTimestamp[state] = shardusGetTime() - - const durationInNanoseconds = endTime[0] * 1e9 + endTime[1] - const durationInMilliseconds = durationInNanoseconds / 1e6 - - queueEntry.txDebug.duration[state] = durationInMilliseconds - - delete queueEntry.txDebug.startTime[state] - delete queueEntry.txDebug.endTime[state] - } - } - clearDebugAwaitStrings(): void { - this.debugLastAwaitedCall = '' - this.debugLastAwaitedCallInner = '' - this.debugLastAwaitedAppCall = '' - this.debugLastAwaitedCallInnerStack = {} - this.debugLastAwaitedAppCallStack = {} - } + } getQueueLengthBuckets(): any { try { @@ -9084,4 +1495,79 @@ class TransactionQueue { } } +// Define interface for methods added via Object.assign +interface TransactionQueue { + // Methods from entryMethods + routeAndQueueAcceptedTransaction( + acceptedTx: AcceptedTx, + sendGossip: boolean, + sender: Shardus.Node | null, + globalModification: boolean, + noConsensus: boolean + ): string | boolean + + // Methods from nonceMethods + isTxInPendingNonceQueue(accountId: string, txId: string): boolean + addTransactionToNonceQueue(nonceQueueItem: NonceQueueItem): { success: boolean; reason?: string; alreadyAdded?: boolean } + processNonceQueue(wrappedAccountsToAdd: any[]): void + getPendingCountInNonceQueue(): number + + // Methods from coreMethods + processTransactions(forceToRun?: boolean): Promise + removeFromQueue(queueEntry: QueueEntry, currentIndex: number): void + + // Methods from handlers + setupHandlers(): void + handleSharedTX(tx: Shardus.TimestampedTx, appData: unknown, sender: Shardus.Node): QueueEntry + + // Methods from factMethods + getQueueEntrySafe(txId: string): QueueEntry | null + getQueueEntryArchived(txId: string, route: string): QueueEntry | null + getQueueEntry(txId: string): QueueEntry | null + queueEntryGetTransactionGroup(queueEntry: QueueEntry, tryUpdate?: boolean): Shardus.Node[] + queueEntryGetConsensusGroup(queueEntry: QueueEntry): Shardus.Node[] + queueEntryGetConsensusGroupForAccount(queueEntry: QueueEntry, account: string, cycle?: number): Shardus.Node[] + getStartAndEndIndexOfTargetGroup(targetGroup: string[], transactionGroup: any[]): { startIndex: number; endIndex: number } + factValidateCorrespondingTellFinalDataSender(queueEntry: QueueEntry, sender: string): boolean + factTellCorrespondingNodesFinalData(queueEntry: QueueEntry): void + getArchivedQueueEntryByAccountIdAndHash(accountId: string, hash: string, msg: string): QueueEntry | null + requestFinalData(queueEntry: QueueEntry, accountIds: string[], nodesToAskKeys?: string[] | null, includeAppReceiptData?: boolean): Promise + + // Methods from archiverMethods + getArchiverReceiptFromQueueEntry(queueEntry: QueueEntry): Promise + addOriginalTxDataToForward(queueEntry: QueueEntry): void + addReceiptToForward(queueEntry: QueueEntry, debugString?: string): Promise + getReceiptsToForward(): ArchiverReceipt[] + + // Methods from debugMethods + setDebugLastAwaitedCall(label: string, complete?: DebugComplete): void + setDebugSetLastAppAwait(label: string, complete?: DebugComplete): void + setDebugLastAwaitedCallInner(label: string, complete?: DebugComplete): void + clearTxDebugStatList(): void + printTxDebug(): string + printTxDebugByTxId(txId: string): string + dumpTxDebugToStatList(queueEntry: QueueEntry): void + txDebugMarkStartTime(queueEntry: QueueEntry, state: string): void + txDebugMarkEndTime(queueEntry: QueueEntry, state: string): void + getDebugProccessingStatus(): unknown + clearStuckProcessingDebugVars(): void + getDebugQueueInfo(queueEntry: QueueEntry): any + + // Methods from other split files that need to be added + processQueue_accountSeen(seenAccounts: SeenAccounts, queueEntry: QueueEntry): boolean + processQueue_markAccountsSeen(seenAccounts: SeenAccounts, queueEntry: QueueEntry): void + processQueue_clearAccountsSeen(seenAccounts: SeenAccounts, queueEntry: QueueEntry): void + processQueue_debugAccountData(queueEntry: QueueEntry, app: any): string +} + +Object.assign(TransactionQueue.prototype, handlers); +Object.assign(TransactionQueue.prototype, coreMethods); +Object.assign(TransactionQueue.prototype, factMethods); +Object.assign(TransactionQueue.prototype, nonceMethods); +Object.assign(TransactionQueue.prototype, entryMethods); +Object.assign(TransactionQueue.prototype, seenMethods); +Object.assign(TransactionQueue.prototype, expiredMethods); +Object.assign(TransactionQueue.prototype, debugMethods); +Object.assign(TransactionQueue.prototype, archiverMethods); + export default TransactionQueue diff --git a/src/state-manager/Utils.ts b/src/state-manager/Utils.ts new file mode 100644 index 000000000..8b3e14b76 --- /dev/null +++ b/src/state-manager/Utils.ts @@ -0,0 +1,406 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { logFlags } from '../logger' +import * as utils from '../utils' +import * as ShardusTypes from '../shardus/shardus-types' +import ShardFunctions from './shardFunctions' +import { GetAccountDataByRangeSmart } from './state-manager-types' +import { nestedCountersInstance } from '../utils/nestedCounters' +import * as Comms from '../p2p/Comms' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +// GetAccountDataByRange types are not used in this file +import * as NodeList from '../p2p/NodeList' +import { shardusGetTime } from '../network' +import { AccountCopy, WrappedStateArray, AccountHashCache } from './state-manager-types' +import { timingSafeEqual } from 'crypto' +import * as CycleChain from '../p2p/CycleChain' +import { DebugComplete } from '../state-manager/TransactionQueue' +import * as Utils from '../utils' + +/*** + * ## ## ######## #### ## ###### + * ## ## ## ## ## ## ## + * ## ## ## ## ## ## + * ## ## ## ## ## ###### + * ## ## ## ## ## ## + * ## ## ## ## ## ## ## + * ####### ## #### ######## ###### + */ + +export const utilsMethods = { + debugNodeGroup(key: string, key2: number, msg: string, nodes: P2PTypes.P2PTypes.NodeInfo[]) { + if (logFlags.playback) + this.logger.playbackLogNote( + 'debugNodeGroup', + `${utils.stringifyReduce(key)}_${key2}`, + `${msg} ${utils.stringifyReduce( + nodes.map((node) => { + return { id: node.id, port: node.externalPort } + }) + )}` + ) + }, + + getRandomInt(max: number): number { + return Math.floor(Math.random() * Math.floor(max)) + }, + + tryGetBoolProperty(parent: Record, propertyName: string, defaultValue: boolean) { + if (parent == null) { + return defaultValue + } + // eslint-disable-next-line security/detect-object-injection + const tempValue = parent[propertyName] + if (typeof tempValue === 'boolean') { + return tempValue + } + return defaultValue + }, + + /** + * test once at the given probability to fail. If it fails, log the message and return true. If it doesnt fail, return false. + * @param failChance + * @param debugName + * @param key + * @param message + * @param verboseRequired + * @returns + */ + testFailChance( + failChance: number, + debugName: string, + key: string, + message: string, + verboseRequired: boolean + ): boolean { + if (failChance == null) { + return false + } + + const rand = Math.random() + if (failChance > rand) { + if (debugName != null) { + if (verboseRequired === false || logFlags.verbose) { + this.logger.playbackLogNote(`dbg_fail_${debugName}`, key, message) + } + nestedCountersInstance.countEvent('dbg_fail_', debugName ?? 'unknown') + } + return true + } + return false + }, + + async startCatchUpQueue() { + //make sure we have cycle shard data. + await this.waitForShardData('startCatchUpQueue') + + await this._firstTimeQueueAwait() + + if (logFlags.console) console.log('syncStateData startCatchUpQueue ' + ' time:' + shardusGetTime()) + + // all complete! + this.mainLogger.info(`DATASYNC: complete`) + this.logger.playbackLogState('datasyncComplete', '', '') + + // update the debug tag and restart the queue + this.dataPhaseTag = 'ACTIVE: ' + this.accountSync.dataSyncMainPhaseComplete = true + //update sync statement + this.accountSync.syncStatement.syncComplete = true + this.accountSync.syncStatement.cycleEnded = this.currentCycleShardData.cycleNumber + this.accountSync.syncStatement.numCycles = + this.accountSync.syncStatement.cycleEnded - this.accountSync.syncStatement.cycleStarted + + this.accountSync.syncStatement.syncEndTime = shardusGetTime() + this.accountSync.syncStatement.syncSeconds = + (this.accountSync.syncStatement.syncEndTime - this.accountSync.syncStatement.syncStartTime) / 1000 + + /* prettier-ignore */ nestedCountersInstance.countEvent('sync', `sync comlete numCycles: ${this.accountSync.syncStatement.numCycles} start:${this.accountSync.syncStatement.cycleStarted} end:${this.accountSync.syncStatement.cycleEnded} numAccounts: ${this.accountSync.syncStatement.numAccounts}`) + if (this.accountSync.syncStatement.internalFlag === true) { + /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_syncStatement', ` `, `${utils.stringifyReduce(this.accountSync.syncStatement)}`) + this.accountSync.syncStatmentIsComplete() + /* prettier-ignore */ this.statemanager_fatal( 'shrd_sync_syncStatement-startCatchUpQueue', `${utils.stringifyReduce(this.accountSync.syncStatement)}` ) + /* prettier-ignore */ this.mainLogger.debug(`DATASYNC: syncStatement-startCatchUpQueue c:${this.currentCycleShardData.cycleNumber} ${utils.stringifyReduce(this.accountSync.syncStatement)}`) + } else { + this.accountSync.syncStatement.internalFlag = true + } + + this.tryStartTransactionProcessingQueue() + + if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_mainphaseComplete', ` `, ` `) + }, + + // just a placeholder for later + recordPotentialBadnode() { + // The may need to live on the p2p class, or call into it + // record the evidence. + // potentially report it + }, + + /** + * writeCombinedAccountDataToBackups + * @param failedHashes This is a list of hashes that failed and should be ignored in the write operation. + */ + async writeCombinedAccountDataToBackups( + goodAccounts: ShardusTypes.WrappedData[], + failedHashes: string[] + ): Promise { + // ?:{[id:string]: boolean} + if (failedHashes.length === 0 && goodAccounts.length === 0) { + return 0 // nothing to do yet + } + + const failedAccountsById: { [id: string]: boolean } = {} + for (const hash of failedHashes) { + // eslint-disable-next-line security/detect-object-injection + failedAccountsById[hash] = true + } + + const lastCycle = this.p2p.state.getLastCycle() + const cycleNumber = lastCycle.counter + const accountCopies: AccountCopy[] = [] + for (const accountEntry of goodAccounts) { + // check failed hashes + if (failedAccountsById[accountEntry.stateId]) { + continue + } + // wrappedAccounts.push({ accountId: account.address, stateId: account.hash, data: account, timestamp: account.timestamp }) + const isGlobal = this.accountGlobals.isGlobalAccount(accountEntry.accountId) + const accountCopy: AccountCopy = { + accountId: accountEntry.accountId, + data: accountEntry.data, + timestamp: accountEntry.timestamp, + hash: accountEntry.stateId, + cycleNumber, + isGlobal: isGlobal || false, + } + accountCopies.push(accountCopy) + } + /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('writeCombinedAccountDataToBackups ' + accountCopies.length + ' ' + utils.stringifyReduce(accountCopies)) + + if (logFlags.verbose) console.log('DBG accountCopies. (in main log)') + + // await this.storage.createAccountCopies(accountCopies) + await this.storage.createOrReplaceAccountCopy(accountCopies) + + return accountCopies.length + }, + + // let this learn offset.. + // if we get the same range request from the same client..... nope! + + // This will make calls to app.getAccountDataByRange but if we are close enough to real time it will query any newer data and return lastUpdateNeeded = true + async getAccountDataByRangeSmart( + accountStart: string, + accountEnd: string, + tsStart: number, + maxRecords: number, + offset: number, + accountOffset: string + ): Promise { + const tsEnd = shardusGetTime() + + // todo convert this to use account backup data, then compare perf vs app as num accounts grows past 10k + + // alternate todo: query it all from the app then create a smart streaming wrapper that persists between calls and even + // handles updates to day by putting updated data at the end of the list with updated data wrappers. + + const wrappedAccounts = await this.app.getAccountDataByRange( + accountStart, + accountEnd, + tsStart, + tsEnd, + maxRecords, + offset, + accountOffset + ) + let lastUpdateNeeded = false + let wrappedAccounts2: WrappedStateArray = [] + let highestTs = 0 + let delta = 0 + // do we need more updates + if (wrappedAccounts.length === 0) { + lastUpdateNeeded = true + } else { + // see if our newest record is new enough + highestTs = 0 + for (const account of wrappedAccounts) { + if (account.timestamp > highestTs) { + highestTs = account.timestamp + } + } + delta = tsEnd - highestTs + // if the data we go was close enough to current time then we are done + // may have to be carefull about how we tune this value relative to the rate that we make this query + // we should try to make this query more often then the delta. + if (logFlags.verbose) console.log('delta ' + delta) + // increased allowed delta to allow for a better chance to catch up + + if (delta < this.queueSitTime * 2) { + const tsStart2 = highestTs + wrappedAccounts2 = await this.app.getAccountDataByRange( + accountStart, + accountEnd, + tsStart2, + shardusGetTime(), + maxRecords, + 0, + '' + ) + lastUpdateNeeded = true //?? not sure .. this could cause us to skip some, but that is ok! + } + } + return { wrappedAccounts, lastUpdateNeeded, wrappedAccounts2, highestTs, delta } + }, + + testAccountDataWrapped(accountDataList: ShardusTypes.WrappedData[]) { + if (accountDataList == null) { + return + } + for (const wrappedData of accountDataList) { + const { accountId, stateId, data: recordData } = wrappedData + if (stateId != wrappedData.stateId) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`testAccountDataWrapped what is going on!!: ${utils.makeShortHash(wrappedData.stateId)} stateId: ${utils.makeShortHash(stateId)} `) + } + const hash = this.app.calculateAccountHash(recordData) + + // comparison safe against timing attacks + if (stateId.length !== hash.length || !timingSafeEqual(Buffer.from(stateId), Buffer.from(hash))) { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`testAccountDataWrapped hash test failed: setAccountData for account ${utils.makeShortHash(accountId)} expected account hash: ${utils.makeShortHash(stateId)} got ${utils.makeShortHash(hash)} `) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('testAccountDataWrapped hash test failed: details: ' + utils.stringifyReduce(recordData)) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('testAccountDataWrapped hash test failed: wrappedData.stateId: ' + utils.makeShortHash(wrappedData.stateId)) + const stack = new Error().stack + if (logFlags.error) this.mainLogger.error(`stack: ${stack}`) + } + } + }, + + async checkAndSetAccountData( + accountRecords: ShardusTypes.WrappedData[], + note: string, + processStats: boolean, + updatedAccounts: string[] = null + ): Promise { + const accountsToAdd: unknown[] = [] + const wrappedAccountsToAdd: ShardusTypes.WrappedData[] = [] + const failedHashes: string[] = [] + for (const wrappedAccount of accountRecords) { + const { accountId, stateId, data: recordData, timestamp } = wrappedAccount + const hash = this.app.calculateAccountHash(recordData) + const cycleToRecordOn = CycleChain.getCycleNumberFromTimestamp(wrappedAccount.timestamp) + if (cycleToRecordOn <= -1) { + this.statemanager_fatal( + `checkAndSetAccountData cycleToRecordOn==-1`, + `checkAndSetAccountData cycleToRecordOn==-1 ${wrappedAccount.timestamp}` + ) + failedHashes.push(accountId) + return failedHashes + } + //TODO perf remove this when we are satisfied with the situation + //Additional testing to cache if we try to overrite with older data + if (this.accountCache.hasAccount(accountId)) { + const accountMemData: AccountHashCache = this.accountCache.getAccountHash(accountId) + if (timestamp < accountMemData.t) { + //should update cache anyway (older value may be needed) + + // I have doubts that cache should be able to roll a value back.. + this.accountCache.updateAccountHash( + wrappedAccount.accountId, + wrappedAccount.stateId, + wrappedAccount.timestamp, + cycleToRecordOn + ) + + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`setAccountData: abort. checkAndSetAccountData older timestamp note:${note} acc: ${utils.makeShortHash(accountId)} timestamp:${timestamp} accountMemData.t:${accountMemData.t} hash: ${utils.makeShortHash(hash)} cache:${utils.stringifyReduce(accountMemData)}`) + continue //this is a major error need to skip the writing. + } + } + + if (stateId.length === hash.length && timingSafeEqual(Buffer.from(stateId), Buffer.from(hash))) { + accountsToAdd.push(recordData) + wrappedAccountsToAdd.push(wrappedAccount) + + if (updatedAccounts != null) { + updatedAccounts.push(accountId) + } + + const debugString = `setAccountData: note:${note} acc: ${utils.makeShortHash( + accountId + )} hash: ${utils.makeShortHash(hash)} ts:${wrappedAccount.timestamp}` + if (logFlags.debug) this.mainLogger.debug(debugString) + if (logFlags.verbose) console.log(debugString) + + if (wrappedAccount.timestamp === 0) { + const stack = new Error().stack + + this.statemanager_fatal( + `checkAndSetAccountData ts=0`, + `checkAndSetAccountData ts=0 ${debugString} ${stack}` + ) + } + + if (processStats) { + if (this.accountCache.hasAccount(accountId)) { + //TODO STATS BUG.. this is what can cause one form of stats bug. + //we may have covered this account in the past, then not covered it, and now we cover it again. Stats doesn't know how to repair + // this situation. + //TODO, need a way to re-init.. dang idk how to do that! + //this.partitionStats.statsDataSummaryUpdate2(cycleToRecordOn, null, wrapedAccount) + + const tryToCorrectStats = true + if (tryToCorrectStats) { + /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.getAccountDataByList') + const accounts = await this.app.getAccountDataByList([wrappedAccount.accountId]) + /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.getAccountDataByList', DebugComplete.Completed) + if (accounts != null && accounts.length === 1) { + this.partitionStats.statsDataSummaryUpdate( + cycleToRecordOn, + accounts[0].data, + wrappedAccount, + 'checkAndSetAccountData-' + note + ) + } + } else { + //old way + this.accountCache.updateAccountHash( + wrappedAccount.accountId, + wrappedAccount.stateId, + wrappedAccount.timestamp, + cycleToRecordOn + ) + } + } else { + //I think some work was done to fix diverging stats, but how did it turn out? + this.partitionStats.statsDataSummaryInit( + cycleToRecordOn, + wrappedAccount.accountId, + wrappedAccount.data, + 'checkAndSetAccountData-' + note + ) + } + } else { + //even if we do not process stats still need to update cache + //todo maybe even take the stats out of the pipeline for updating cache? (but that is kinda tricky) + this.accountCache.updateAccountHash( + wrappedAccount.accountId, + wrappedAccount.stateId, + wrappedAccount.timestamp, + cycleToRecordOn + ) + } + } else { + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`setAccountData hash test failed: setAccountData for account ${utils.makeShortHash(accountId)} expected account hash: ${utils.makeShortHash(stateId)} got ${utils.makeShortHash(hash)} `) + /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('setAccountData hash test failed: details: ' + utils.stringifyReduce(recordData)) + /* prettier-ignore */ if (logFlags.verbose) console.log(`setAccountData hash test failed: setAccountData for account ${utils.makeShortHash(accountId)} expected account hash: ${utils.makeShortHash(stateId)} got ${utils.makeShortHash(hash)} `) + /* prettier-ignore */ if (logFlags.verbose) console.log('setAccountData hash test failed: details: ' + utils.stringifyReduce(recordData)) + failedHashes.push(accountId) + } + } + /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`setAccountData toAdd:${accountsToAdd.length} failed:${failedHashes.length}`) + /* prettier-ignore */ if (logFlags.verbose) console.log(`setAccountData toAdd:${accountsToAdd.length} failed:${failedHashes.length}`) + /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.setAccountData') + await this.app.setAccountData(accountsToAdd) + /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.setAccountData', DebugComplete.Completed) + this.transactionQueue.processNonceQueue(wrappedAccountsToAdd) + return failedHashes + } +} \ No newline at end of file diff --git a/src/state-manager/index.ts b/src/state-manager/index.ts index 80522ad7c..8c45d3ce4 100644 --- a/src/state-manager/index.ts +++ b/src/state-manager/index.ts @@ -1,7 +1,7 @@ -import * as ShardusTypes from '../shardus/shardus-types' import Shardus from '../shardus' +import * as ShardusTypes from '../shardus/shardus-types' -import { StateManager as StateManagerTypes, P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { P2P as P2PTypes, StateManager as StateManagerTypes } from '@shardeum-foundation/lib-types' import { isNodeDown, isNodeLost, isNodeUpRecent } from '../p2p/Lost' @@ -13,109 +13,88 @@ import * as utils from '../utils' import { Utils } from '@shardeum-foundation/lib-types' // not sure about this. -import Profiler, { cUninitializedSize, profilerInstance } from '../utils/profiler' -import { P2PModuleContext as P2P } from '../p2p/Context' -import Storage from '../storage' +import { ReceiptMapResult } from '@shardeum-foundation/lib-types/build/src/state-manager/StateManagerTypes' +import { timingSafeEqual } from 'crypto' +import { Logger as Log4jsLogger } from 'log4js' import Crypto from '../crypto' +import { isServiceMode } from '../debug' import Logger, { logFlags } from '../logger' +import { shardusGetTime } from '../network' +import * as Comms from '../p2p/Comms' import * as Context from '../p2p/Context' +import { P2PModuleContext as P2P } from '../p2p/Context' +import * as CycleChain from '../p2p/CycleChain' +import * as NodeList from '../p2p/NodeList' import { activeByIdOrder, byIdOrder } from '../p2p/NodeList' import * as Self from '../p2p/Self' -import * as NodeList from '../p2p/NodeList' -import * as CycleChain from '../p2p/CycleChain' -import * as Comms from '../p2p/Comms' +import Storage from '../storage' +import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' +import { + GetAccountDataWithQueueHintsReqSerializable, + serializeGetAccountDataWithQueueHintsReq +} from '../types/GetAccountDataWithQueueHintsReq' +import { + deserializeGetAccountDataWithQueueHintsResp, + GetAccountDataWithQueueHintsRespSerializable +} from '../types/GetAccountDataWithQueueHintsResp' +import { + GetAccountQueueCountReq, + serializeGetAccountQueueCountReq +} from '../types/GetAccountQueueCountReq' +import { + deserializeGetAccountQueueCountResp, + GetAccountQueueCountResp +} from '../types/GetAccountQueueCountResp' +// GetAccountDataByRange types are not needed in this file +import { ResponseError } from '../types/ResponseError' import { nestedCountersInstance } from '../utils/nestedCounters' -import PartitionStats from './PartitionStats' +import Profiler from '../utils/profiler' import AccountCache from './AccountCache' -import AccountSync from './AccountSync' import AccountGlobals from './AccountGlobals' -import TransactionQueue, { DebugComplete } from './TransactionQueue' -import TransactionRepair from './TransactionRepair' -import TransactionConsenus from './TransactionConsensus' -import PartitionObjects from './PartitionObjects' -import Deprecated from './Deprecated' import AccountPatcher from './AccountPatcher' +import AccountSync from './AccountSync' import CachedAppDataManager from './CachedAppDataManager' +import PartitionObjects from './PartitionObjects' +import PartitionStats from './PartitionStats' import { - CycleShardData, - PartitionReceipt, - FifoLockObjectMap, - QueueEntry, AcceptedTx, AccountCopy, - GetAccountDataByRangeSmart, - WrappedStateArray, + AccountFilter, AccountHashCache, - RequestReceiptForTxReq, - RequestReceiptForTxResp, - RequestStateForTxReqPost, - RequestStateForTxResp, - RequestTxResp, - AppliedVote, - GetAccountDataWithQueueHintsResp, - DebugDumpPartitions, - DebugDumpRangesCovered, + CycleDebugNotes, + CycleShardData, DebugDumpNodesCovered, DebugDumpPartition, + DebugDumpPartitions, DebugDumpPartitionSkip, - MainHashResults, - SimpleDistanceObject, - WrappedResponses, + DebugDumpRangesCovered, + FifoLockObjectMap, + GetAccountDataByRangeSmart, + GetAccountDataWithQueueHintsResp, LocalCachedData, - AccountFilter, - StringBoolObjectMap, - CycleDebugNotes, - AppliedVoteHash, - RequestReceiptForTxResp_old, - RequestAccountQueueCounts, + MainHashResults, + PartitionReceipt, + Proposal, QueueCountsResponse, QueueCountsResult, - TimestampRemoveRequest, + QueueEntry, + RequestAccountQueueCounts, SignedReceipt, - Proposal, + SimpleDistanceObject, + StringBoolObjectMap, + TimestampRemoveRequest, + WrappedResponses, + WrappedStateArray } from './state-manager-types' -import { isDebugModeMiddleware, isDebugModeMiddlewareLow } from '../network/debugMiddleware' -import { ReceiptMapResult } from '@shardeum-foundation/lib-types/build/src/state-manager/StateManagerTypes' -import { Logger as Log4jsLogger } from 'log4js' -import { timingSafeEqual } from 'crypto' -import { shardusGetTime } from '../network' -import { isServiceMode } from '../debug' -import { InternalRouteEnum } from '../types/enum/InternalRouteEnum' -import { InternalBinaryHandler } from '../types/Handler' -import { Route } from '@shardeum-foundation/lib-types/build/src/p2p/P2PTypes' -import { VectorBufferStream } from '../utils/serialization/VectorBufferStream' -import { TypeIdentifierEnum } from '../types/enum/TypeIdentifierEnum' -import { - deserializeGetAccountDataWithQueueHintsResp, - GetAccountDataWithQueueHintsRespSerializable, - serializeGetAccountDataWithQueueHintsResp, -} from '../types/GetAccountDataWithQueueHintsResp' -import { - deserializeGetAccountDataWithQueueHintsReq, - GetAccountDataWithQueueHintsReqSerializable, - serializeGetAccountDataWithQueueHintsReq, -} from '../types/GetAccountDataWithQueueHintsReq' -import { WrappedDataFromQueueSerializable } from '../types/WrappedDataFromQueue' -import { - deserializeGetAccountQueueCountResp, - GetAccountQueueCountResp, - serializeGetAccountQueueCountResp, -} from '../types/GetAccountQueueCountResp' -import { - deserializeGetAccountQueueCountReq, - GetAccountQueueCountReq, - serializeGetAccountQueueCountReq, -} from '../types/GetAccountQueueCountReq' -import { deserializeRequestStateForTxPostReq } from '../types/RequestStateForTxPostReq' -import { RequestStateForTxPostResp, serializeRequestStateForTxPostResp } from '../types/RequestStateForTxPostResp' -import { getStreamWithTypeCheck, requestErrorHandler } from '../types/Helpers' -import { RequestErrorEnum } from '../types/enum/RequestErrorEnum' -import { deserializeSpreadAppliedVoteHashReq } from '../types/SpreadAppliedVoteHashReq' -import { RequestTxAndStateReq, deserializeRequestTxAndStateReq } from '../types/RequestTxAndStateReq' -import { serializeRequestTxAndStateResp } from '../types/RequestTxAndStateResp' -import { RequestReceiptForTxRespSerialized, serializeRequestReceiptForTxResp } from '../types/RequestReceiptForTxResp' -import { deserializeRequestReceiptForTxReq } from '../types/RequestReceiptForTxReq' -import { BadRequest, InternalError, ResponseError, serializeResponseError } from '../types/ResponseError' +import TransactionConsenus from './TransactionConsensus' +import TransactionQueue, { DebugComplete } from './TransactionQueue' +import TransactionRepair from './TransactionRepair' +import { endpointMethods } from './Endpoints' +import { fifoMethods } from './FIFO' +import { receiptMethods } from './Receipt' +import { remoteAccountMethods } from './RemoteAccount' +import { shardMethods } from './Shard' +import { utilsMethods } from './Utils' export type Callback = (...args: unknown[]) => void @@ -132,6 +111,57 @@ class WrappedEventEmitter extends EventEmitter { /** * StateManager */ +// Interface declaration for methods from split files +interface StateManager { + // Methods from Endpoints.ts + registerEndpoints(): void + _unregisterEndpoints(): void + _registerListener(emitter: EventEmitter, event: string, callback: Callback): void + _unregisterListener(event: string): void + _cleanupListeners(): void + + // Methods from FIFO.ts + fifoLock(fifoName: string): Promise + fifoUnlock(fifoName: string, id: number): void + bulkFifoLockAccounts(accountIDs: string[]): Promise + bulkFifoUnlockAccounts(accountIDs: string[], ourLocks: number[]): void + getLockedFifoAccounts(): FifoLockObjectMap + forceUnlockAllFifoLocks(tag: string): number + clearStaleFifoLocks(): void + + // Methods from Receipt.ts + getSignedReceipt(queueEntry: QueueEntry): SignedReceipt + hasReceipt(queueEntry: QueueEntry): boolean + getReceiptResult(queueEntry: QueueEntry): boolean + getReceiptProposal(queueEntry: QueueEntry): Proposal + generateReceiptMapResults(lastCycle: ShardusTypes.Cycle): ReceiptMapResult[] + + // Methods from RemoteAccount.ts + getLocalOrRemoteAccountQueueCount(address: string): Promise + getLocalOrRemoteAccount(address: string, opts?: { useRICache: boolean; canThrowException?: boolean }): Promise + getAccountFailDump(address: string, message: string): void + getRemoteAccount(address: string): Promise + + // Methods from Shard.ts + updateShardValues(cycleNumber: number, mode: P2PTypes.ModesTypes.Record['mode']): void + calculateChangeInCoverage(): void + getCurrentCycleShardData(): CycleShardData | null + hasCycleShardData(): boolean + waitForShardCalcs(): Promise + + // Methods from Utils.ts + debugNodeGroup(key: string, key2: number, msg: string, nodes: P2PTypes.P2PTypes.NodeInfo[]): void + getRandomInt(max: number): number + tryGetBoolProperty(parent: Record, propertyName: string, defaultValue: boolean): boolean + testFailChance(failChance: number, debugName: string, key: string, message: string, verboseRequired: boolean): boolean + startCatchUpQueue(): Promise + recordPotentialBadnode(): void + writeCombinedAccountDataToBackups(goodAccounts: ShardusTypes.WrappedData[], failedHashes: string[]): Promise + getAccountDataByRangeSmart(accountStart: string, accountEnd: string, tsStart: number, maxRecords: number, offset: number, accountOffset: string): Promise + testAccountDataWrapped(accountDataList: ShardusTypes.WrappedData[]): void + checkAndSetAccountData(accountRecords: ShardusTypes.WrappedData[], note: string, processStats: boolean, updatedAccounts?: string[]): Promise +} + class StateManager { // class StateManager { @@ -161,7 +191,6 @@ class StateManager { partitionObjects: PartitionObjects accountPatcher: AccountPatcher cachedAppDataManager: CachedAppDataManager - depricated: Deprecated // syncTrackers:SyncTracker[]; shardValuesByCycle: Map @@ -312,6 +341,14 @@ class StateManager { this.configsInit() + // Bind methods from split files BEFORE initializing modules + Object.assign(StateManager.prototype, endpointMethods) + Object.assign(StateManager.prototype, fifoMethods) + Object.assign(StateManager.prototype, receiptMethods) + Object.assign(StateManager.prototype, remoteAccountMethods) + Object.assign(StateManager.prototype, shardMethods) + Object.assign(StateManager.prototype, utilsMethods) + //INIT our various modules this.accountCache = new AccountCache(this, profiler, app, logger, crypto, config) @@ -329,7 +366,6 @@ class StateManager { this.transactionConsensus = new TransactionConsenus(this, profiler, app, logger, storage, p2p, crypto, config) this.partitionObjects = new PartitionObjects(this, profiler, app, logger, storage, p2p, crypto, config) - this.depricated = new Deprecated(this, profiler, app, logger, storage, p2p, crypto, config) this.accountPatcher = new AccountPatcher(this, profiler, app, logger, p2p, crypto, config) this.cachedAppDataManager = new CachedAppDataManager(this, profiler, app, logger, crypto, p2p, config) @@ -427,6 +463,7 @@ class StateManager { this.lastActiveCount = -1 this.reinjectTxsMap = new Map() + } renewState() { @@ -511,1916 +548,31 @@ class StateManager { this.voteFlipChance = 0 if (this.config && this.config.debug) { - this.voteFlipChance = this.config.debug.voteFlipChance - if (this.voteFlipChance == null) { - this.voteFlipChance = 0 - } - } - - this.failNoRepairTxChance = 0 - if (this.config && this.config.debug) { - this.failNoRepairTxChance = this.config.debug.failNoRepairTxChance - if (this.failNoRepairTxChance == null) { - this.failNoRepairTxChance = 0 - } - } - } - - // TEMP hack emit events through p2p - // had issues with composition - // emit(event: string | symbol, ...args: any[]){ - // this.p2p.emit(event, args) - - // } - - /*** - * ###### ## ## ### ######## ######## ###### ### ## ###### ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ###### ######### ## ## ######## ## ## ## ## ## ## ## ###### - * ## ## ## ######### ## ## ## ## ## ######### ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ###### ## ## ## ## ## ## ######## ###### ## ## ######## ###### ###### - */ - // This is called once per cycle to update to calculate the necessary shard values. - updateShardValues(cycleNumber: number, mode: P2PTypes.ModesTypes.Record['mode']) { - if (this.currentCycleShardData == null) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_firstCycle', `${cycleNumber}`, ` first init `) - } - - const cycleShardData = {} as CycleShardData - - // lets make sure shard calculation are happening at a consistent interval - const calculationTime = shardusGetTime() - if (this.lastShardCalculationTS > 0) { - const delay = calculationTime - this.lastShardCalculationTS - this.config.p2p.cycleDuration * 1000 - - if (delay > 5000) { - this.statemanager_fatal( - `updateShardValues-delay > 5s ${delay / 1000}`, - `updateShardValues-delay ${delay / 1000}` - ) - } else if (delay > 4000) { - nestedCountersInstance.countEvent('stateManager', 'updateShardValues delay > 4s') - } else if (delay > 3000) { - nestedCountersInstance.countEvent('stateManager', 'updateShardValues delay > 3s') - } else if (delay > 2000) { - nestedCountersInstance.countEvent('stateManager', 'updateShardValues delay > 2s') - } - - cycleShardData.calculationTime = calculationTime - } - this.lastShardCalculationTS = calculationTime - - // todo get current cycle.. store this by cycle? - cycleShardData.nodeShardDataMap = new Map() - cycleShardData.parititionShardDataMap = new Map() - cycleShardData.nodes = this.getNodesForCycleShard(mode) - cycleShardData.activeFoundationNodes = activeByIdOrder.filter((node) => node.foundationNode) - cycleShardData.cycleNumber = cycleNumber - cycleShardData.partitionsToSkip = new Map() - cycleShardData.hasCompleteData = false - - if (this.lastActiveCount === -1) { - this.lastActiveCount = activeByIdOrder.length - } else { - const change = activeByIdOrder.length - this.lastActiveCount - if (change != 0) { - /* prettier-ignore */ nestedCountersInstance.countEvent('networkSize',`cyc:${cycleNumber} active:${activeByIdOrder.length} change:${change}`) - } - this.lastActiveCount = activeByIdOrder.length - } - - try { - // cycleShardData.ourNode = NodeList.nodes.get(Self.id) - cycleShardData.ourNode = NodeList.nodes.get(this.p2p.getNodeId()) - } catch (ex) { - if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_notactive', `${cycleNumber}`, ` `) - return - } - - if (cycleShardData.nodes.length === 0) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_noNodeListAvailable', `${cycleNumber}`, ` `) - return // no active nodes so stop calculating values - } - - if (this.config === null || this.config.sharding === null) { - throw new Error('this.config.sharding === null') - } - - const cycle = this.p2p.state.getLastCycle() - if (cycle !== null && cycle !== undefined) { - cycleShardData.timestamp = cycle.start * 1000 - cycleShardData.timestampEndCycle = (cycle.start + cycle.duration) * 1000 - } - - const edgeNodes = this.config.sharding.nodesPerEdge as number - - // save this per cycle? - cycleShardData.shardGlobals = ShardFunctions.calculateShardGlobals( - cycleShardData.nodes.length, - this.config.sharding.nodesPerConsensusGroup as number, - edgeNodes - ) - - this.profiler.profileSectionStart('updateShardValues_computePartitionShardDataMap1') //13ms, #:60 - // partition shard data - ShardFunctions.computePartitionShardDataMap( - cycleShardData.shardGlobals, - cycleShardData.parititionShardDataMap, - 0, - cycleShardData.shardGlobals.numPartitions - ) - this.profiler.profileSectionEnd('updateShardValues_computePartitionShardDataMap1') - - this.profiler.profileSectionStart('updateShardValues_computePartitionShardDataMap2') //37ms, #:60 - // generate limited data for all nodes data for all nodes. - ShardFunctions.computeNodePartitionDataMap( - cycleShardData.shardGlobals, - cycleShardData.nodeShardDataMap, - cycleShardData.nodes, - cycleShardData.parititionShardDataMap, - cycleShardData.nodes, - false - ) - this.profiler.profileSectionEnd('updateShardValues_computePartitionShardDataMap2') - - this.profiler.profileSectionStart('updateShardValues_computeNodePartitionData') //22ms, #:60 - // get extended data for our node - cycleShardData.nodeShardData = ShardFunctions.computeNodePartitionData( - cycleShardData.shardGlobals, - cycleShardData.ourNode, - cycleShardData.nodeShardDataMap, - cycleShardData.parititionShardDataMap, - cycleShardData.nodes, - true - ) - this.profiler.profileSectionEnd('updateShardValues_computeNodePartitionData') - - // This is currently redudnant if we move to lazy init of extended data we should turn it back on - // this.profiler.profileSectionStart('updateShardValues_computeNodePartitionDataMap1') // 4ms, #:60 - // TODO perf scalability need to generate this as needed in very large networks with millions of nodes. - // generate full data for nodes that store our home partition - // - // ShardFunctions.computeNodePartitionDataMap(cycleShardData.shardGlobals, cycleShardData.nodeShardDataMap, cycleShardData.nodeShardData.nodeThatStoreOurParitionFull, cycleShardData.parititionShardDataMap, cycleShardData.nodes, true, false) - // this.profiler.profileSectionEnd('updateShardValues_computeNodePartitionDataMap1') - - // cycleShardData.nodeShardData = cycleShardData.nodeShardDataMap.get(cycleShardData.ourNode.id) - - this.profiler.profileSectionStart('updateShardValues_computeNodePartitionDataMap2') //232ms, #:60 - // generate lightweight data for all active nodes (note that last parameter is false to specify the lightweight data) - const fullDataForDebug = true // Set this to false for performance reasons!!! setting it to true saves us from having to recalculate stuff when we dump logs. - ShardFunctions.computeNodePartitionDataMap( - cycleShardData.shardGlobals, - cycleShardData.nodeShardDataMap, - cycleShardData.nodes, - cycleShardData.parititionShardDataMap, - cycleShardData.nodes, - fullDataForDebug - ) - this.profiler.profileSectionEnd('updateShardValues_computeNodePartitionDataMap2') - - // TODO if fullDataForDebug gets turned false we will update the guts of this calculation - // ShardFunctions.computeNodePartitionDataMapExt(cycleShardData.shardGlobals, cycleShardData.nodeShardDataMap, cycleShardData.nodes, cycleShardData.parititionShardDataMap, cycleShardData.nodes) - - this.currentCycleShardData = cycleShardData - this.shardValuesByCycle.set(cycleNumber, cycleShardData) - - // calculate nodes that would just now start syncing edge data because the network shrank. - if (cycleShardData.ourNode.status === 'active') { - this.profiler.profileSectionStart('updateShardValues_getOrderedSyncingNeighbors') //0 - // calculate if there are any nearby nodes that are syncing right now. - if (logFlags.verbose) this.mainLogger.debug(`updateShardValues: getOrderedSyncingNeighbors`) - cycleShardData.syncingNeighbors = this.p2p.state.getOrderedSyncingNeighbors(cycleShardData.ourNode) - this.profiler.profileSectionEnd('updateShardValues_getOrderedSyncingNeighbors') - - if (cycleShardData.syncingNeighbors.length > 0) { - //old: add all syncing nodes - cycleShardData.syncingNeighborsTxGroup = [...cycleShardData.syncingNeighbors] - //TODO filter syncingNeighborsTxGroup to nodes that would care..(cover our data) - // for(let node in cycleShardData.syncingNeighbors){ - - // ShardFunctions. - // } - cycleShardData.syncingNeighborsTxGroup.push(cycleShardData.ourNode) - cycleShardData.hasSyncingNeighbors = true - - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_neighbors', `${cycleShardData.cycleNumber}`, ` neighbors: ${utils.stringifyReduce(cycleShardData.syncingNeighbors.map((node) => utils.makeShortHash(node.id) + ':' + node.externalPort))}`) - } else { - cycleShardData.hasSyncingNeighbors = false - } - - if (logFlags.console) console.log(`updateShardValues cycle:${cycleShardData.cycleNumber} `) - - // if (this.preTXQueue.length > 0) { - // for (let tx of this.preTXQueue) { - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_preTX', ` `, ` ${utils.stringifyReduce(tx)} `) - // this.transactionQueue.routeAndQueueAcceptedTransaction(tx, false, null) - // } - // this.preTXQueue = [] - // } - this.profiler.profileSectionStart('updateShardValues_updateRuntimeSyncTrackers') //0 - this.accountSync.updateRuntimeSyncTrackers() - this.profiler.profileSectionEnd('updateShardValues_updateRuntimeSyncTrackers') - // this.calculateChangeInCoverage() - } - - this.profiler.profileSectionStart('updateShardValues_getPartitionLists') // 0 - // calculate our consensus partitions for use by data repair: - // cycleShardData.ourConsensusPartitions = [] - const partitions = ShardFunctions.getConsenusPartitionList( - cycleShardData.shardGlobals, - cycleShardData.nodeShardData - ) - cycleShardData.ourConsensusPartitions = partitions - - const partitions2 = ShardFunctions.getStoredPartitionList(cycleShardData.shardGlobals, cycleShardData.nodeShardData) - cycleShardData.ourStoredPartitions = partitions2 - - this.profiler.profileSectionEnd('updateShardValues_getPartitionLists') - - // this will be a huge log. - // Temp disable for log size - // /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_cycleData', `${cycleNumber}`, ` cycleShardData: cycle:${cycleNumber} data: ${utils.stringifyReduce(cycleShardData)}`) - /* prettier-ignore */ if (logFlags.playback ) this.logger.playbackLogNote('shrd_sync_cycleData', `${cycleNumber}`, ` cycleShardData: cycle:${this.currentCycleShardData.cycleNumber} `) - - this.lastActiveNodeCount = cycleShardData.nodes.length - - cycleShardData.hasCompleteData = true - } - - calculateChangeInCoverage(): void { - // maybe this should be a shard function so we can run unit tests on it for expanding or shrinking networks! - const newSharddata = this.currentCycleShardData - - if (newSharddata == null || this.currentCycleShardData == null) { - return - } - - let cycleToCompareTo = newSharddata.cycleNumber - 1 - - //if this is our first time to sync we should attempt to compare to an older cycle - if (this.firstTimeToRuntimeSync === true) { - this.firstTimeToRuntimeSync = false - - //make sure the cycle started is an older one - if (this.accountSync.syncStatement.cycleStarted < cycleToCompareTo) { - cycleToCompareTo = this.accountSync.syncStatement.cycleStarted - } else { - //in theory we could just return but I dont want to change that side of the branch yet. - } - } - - const oldShardData = this.shardValuesByCycle.get(cycleToCompareTo) - - if (oldShardData == null) { - // log ? - return - } - const cycle = this.currentCycleShardData.cycleNumber - // oldShardData.shardGlobals, newSharddata.shardGlobals - const coverageChanges = ShardFunctions.computeCoverageChanges( - oldShardData.nodeShardData, - newSharddata.nodeShardData - ) - - this.coverageChangesCopy = coverageChanges - - for (const change of coverageChanges) { - // log info about the change. - // ${utils.stringifyReduce(change)} - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_change', `${oldShardData.cycleNumber}->${newSharddata.cycleNumber}`, ` ${ShardFunctions.leadZeros8(change.start.toString(16))}->${ShardFunctions.leadZeros8(change.end.toString(16))} `) - - // create a range object from our coverage change. - - const range = { - startAddr: 0, - endAddr: 0, - low: '', - high: '', - } as StateManagerTypes.shardFunctionTypes.BasicAddressRange // this init is a somewhat wastefull way to allow the type to be happy. - range.startAddr = change.start - range.endAddr = change.end - range.low = ShardFunctions.leadZeros8(range.startAddr.toString(16)) + '0'.repeat(56) - range.high = ShardFunctions.leadZeros8(range.endAddr.toString(16)) + 'f'.repeat(56) - // create sync trackers - this.accountSync.createSyncTrackerByRange(range, cycle) - } - - if (coverageChanges.length > 0) { - this.accountSync.syncRuntimeTrackers() - } - // launch sync trackers - // coverage changes... should have a list of changes - // should note if the changes are an increase or reduction in covered area. - // log the changes. - // next would be to create some syncTrackers based to cover increases - } - - getCurrentCycleShardData(): CycleShardData | null { - if (this.currentCycleShardData === null) { - const cycle = this.p2p.state.getLastCycle() - if (cycle === null || cycle === undefined) { - return null - } - this.updateShardValues(cycle.counter, cycle.mode) - } - - return this.currentCycleShardData - } - - hasCycleShardData() { - return this.currentCycleShardData != null - } - - async waitForShardCalcs() { - while (this.currentCycleShardData == null) { - this.getCurrentCycleShardData() - await utils.sleep(1000) - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_waitForShardData_firstNode', ``, ` ${utils.stringifyReduce(this.currentCycleShardData)} `) - } - } - - /*** - * ## ## ######## #### ## ###### - * ## ## ## ## ## ## ## - * ## ## ## ## ## ## - * ## ## ## ## ## ###### - * ## ## ## ## ## ## - * ## ## ## ## ## ## ## - * ####### ## #### ######## ###### - */ - - debugNodeGroup(key: string, key2: number, msg: string, nodes: P2PTypes.P2PTypes.NodeInfo[]) { - if (logFlags.playback) - this.logger.playbackLogNote( - 'debugNodeGroup', - `${utils.stringifyReduce(key)}_${key2}`, - `${msg} ${utils.stringifyReduce( - nodes.map((node) => { - return { id: node.id, port: node.externalPort } - }) - )}` - ) - } - - getRandomInt(max: number): number { - return Math.floor(Math.random() * Math.floor(max)) - } - - tryGetBoolProperty(parent: Record, propertyName: string, defaultValue: boolean) { - if (parent == null) { - return defaultValue - } - // eslint-disable-next-line security/detect-object-injection - const tempValue = parent[propertyName] - if (typeof tempValue === 'boolean') { - return tempValue - } - return defaultValue - } - - /** - * test once at the given probability to fail. If it fails, log the message and return true. If it doesnt fail, return false. - * @param failChance - * @param debugName - * @param key - * @param message - * @param verboseRequired - * @returns - */ - testFailChance( - failChance: number, - debugName: string, - key: string, - message: string, - verboseRequired: boolean - ): boolean { - if (failChance == null) { - return false - } - - const rand = Math.random() - if (failChance > rand) { - if (debugName != null) { - if (verboseRequired === false || logFlags.verbose) { - this.logger.playbackLogNote(`dbg_fail_${debugName}`, key, message) - } - nestedCountersInstance.countEvent('dbg_fail_', debugName ?? 'unknown') - } - return true - } - return false - } - - async startCatchUpQueue() { - //make sure we have cycle shard data. - await this.waitForShardData('startCatchUpQueue') - - await this._firstTimeQueueAwait() - - if (logFlags.console) console.log('syncStateData startCatchUpQueue ' + ' time:' + shardusGetTime()) - - // all complete! - this.mainLogger.info(`DATASYNC: complete`) - this.logger.playbackLogState('datasyncComplete', '', '') - - // update the debug tag and restart the queue - this.dataPhaseTag = 'ACTIVE: ' - this.accountSync.dataSyncMainPhaseComplete = true - //update sync statement - this.accountSync.syncStatement.syncComplete = true - this.accountSync.syncStatement.cycleEnded = this.currentCycleShardData.cycleNumber - this.accountSync.syncStatement.numCycles = - this.accountSync.syncStatement.cycleEnded - this.accountSync.syncStatement.cycleStarted - - this.accountSync.syncStatement.syncEndTime = shardusGetTime() - this.accountSync.syncStatement.syncSeconds = - (this.accountSync.syncStatement.syncEndTime - this.accountSync.syncStatement.syncStartTime) / 1000 - - /* prettier-ignore */ nestedCountersInstance.countEvent('sync', `sync comlete numCycles: ${this.accountSync.syncStatement.numCycles} start:${this.accountSync.syncStatement.cycleStarted} end:${this.accountSync.syncStatement.cycleEnded} numAccounts: ${this.accountSync.syncStatement.numAccounts}`) - if (this.accountSync.syncStatement.internalFlag === true) { - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_syncStatement', ` `, `${utils.stringifyReduce(this.accountSync.syncStatement)}`) - this.accountSync.syncStatmentIsComplete() - /* prettier-ignore */ this.statemanager_fatal( 'shrd_sync_syncStatement-startCatchUpQueue', `${utils.stringifyReduce(this.accountSync.syncStatement)}` ) - /* prettier-ignore */ this.mainLogger.debug(`DATASYNC: syncStatement-startCatchUpQueue c:${this.currentCycleShardData.cycleNumber} ${utils.stringifyReduce(this.accountSync.syncStatement)}`) - } else { - this.accountSync.syncStatement.internalFlag = true - } - - this.tryStartTransactionProcessingQueue() - - if (logFlags.playback) this.logger.playbackLogNote('shrd_sync_mainphaseComplete', ` `, ` `) - } - - // just a placeholder for later - recordPotentialBadnode() { - // The may need to live on the p2p class, or call into it - // record the evidence. - // potentially report it - } - - /** - * writeCombinedAccountDataToBackups - * @param failedHashes This is a list of hashes that failed and should be ignored in the write operation. - */ - async writeCombinedAccountDataToBackups( - goodAccounts: ShardusTypes.WrappedData[], - failedHashes: string[] - ): Promise { - // ?:{[id:string]: boolean} - if (failedHashes.length === 0 && goodAccounts.length === 0) { - return 0 // nothing to do yet - } - - const failedAccountsById: { [id: string]: boolean } = {} - for (const hash of failedHashes) { - // eslint-disable-next-line security/detect-object-injection - failedAccountsById[hash] = true - } - - const lastCycle = this.p2p.state.getLastCycle() - const cycleNumber = lastCycle.counter - const accountCopies: AccountCopy[] = [] - for (const accountEntry of goodAccounts) { - // check failed hashes - if (failedAccountsById[accountEntry.stateId]) { - continue - } - // wrappedAccounts.push({ accountId: account.address, stateId: account.hash, data: account, timestamp: account.timestamp }) - const isGlobal = this.accountGlobals.isGlobalAccount(accountEntry.accountId) - const accountCopy: AccountCopy = { - accountId: accountEntry.accountId, - data: accountEntry.data, - timestamp: accountEntry.timestamp, - hash: accountEntry.stateId, - cycleNumber, - isGlobal: isGlobal || false, - } - accountCopies.push(accountCopy) - } - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug('writeCombinedAccountDataToBackups ' + accountCopies.length + ' ' + utils.stringifyReduce(accountCopies)) - - if (logFlags.verbose) console.log('DBG accountCopies. (in main log)') - - // await this.storage.createAccountCopies(accountCopies) - await this.storage.createOrReplaceAccountCopy(accountCopies) - - return accountCopies.length - } - - // let this learn offset.. - // if we get the same range request from the same client..... nope! - - // This will make calls to app.getAccountDataByRange but if we are close enough to real time it will query any newer data and return lastUpdateNeeded = true - async getAccountDataByRangeSmart( - accountStart: string, - accountEnd: string, - tsStart: number, - maxRecords: number, - offset: number, - accountOffset: string - ): Promise { - const tsEnd = shardusGetTime() - - // todo convert this to use account backup data, then compare perf vs app as num accounts grows past 10k - - // alternate todo: query it all from the app then create a smart streaming wrapper that persists between calls and even - // handles updates to day by putting updated data at the end of the list with updated data wrappers. - - const wrappedAccounts = await this.app.getAccountDataByRange( - accountStart, - accountEnd, - tsStart, - tsEnd, - maxRecords, - offset, - accountOffset - ) - let lastUpdateNeeded = false - let wrappedAccounts2: WrappedStateArray = [] - let highestTs = 0 - let delta = 0 - // do we need more updates - if (wrappedAccounts.length === 0) { - lastUpdateNeeded = true - } else { - // see if our newest record is new enough - highestTs = 0 - for (const account of wrappedAccounts) { - if (account.timestamp > highestTs) { - highestTs = account.timestamp - } - } - delta = tsEnd - highestTs - // if the data we go was close enough to current time then we are done - // may have to be carefull about how we tune this value relative to the rate that we make this query - // we should try to make this query more often then the delta. - if (logFlags.verbose) console.log('delta ' + delta) - // increased allowed delta to allow for a better chance to catch up - - if (delta < this.queueSitTime * 2) { - const tsStart2 = highestTs - wrappedAccounts2 = await this.app.getAccountDataByRange( - accountStart, - accountEnd, - tsStart2, - shardusGetTime(), - maxRecords, - 0, - '' - ) - lastUpdateNeeded = true //?? not sure .. this could cause us to skip some, but that is ok! - } - } - return { wrappedAccounts, lastUpdateNeeded, wrappedAccounts2, highestTs, delta } - } - - testAccountDataWrapped(accountDataList: ShardusTypes.WrappedData[]) { - if (accountDataList == null) { - return - } - for (const wrappedData of accountDataList) { - const { accountId, stateId, data: recordData } = wrappedData - if (stateId != wrappedData.stateId) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`testAccountDataWrapped what is going on!!: ${utils.makeShortHash(wrappedData.stateId)} stateId: ${utils.makeShortHash(stateId)} `) - } - const hash = this.app.calculateAccountHash(recordData) - - // comparison safe against timing attacks - if (stateId.length !== hash.length || !timingSafeEqual(Buffer.from(stateId), Buffer.from(hash))) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`testAccountDataWrapped hash test failed: setAccountData for account ${utils.makeShortHash(accountId)} expected account hash: ${utils.makeShortHash(stateId)} got ${utils.makeShortHash(hash)} `) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('testAccountDataWrapped hash test failed: details: ' + Utils.safeStringify(recordData)) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('testAccountDataWrapped hash test failed: wrappedData.stateId: ' + utils.makeShortHash(wrappedData.stateId)) - const stack = new Error().stack - if (logFlags.error) this.mainLogger.error(`stack: ${stack}`) - } - } - } - - async checkAndSetAccountData( - accountRecords: ShardusTypes.WrappedData[], - note: string, - processStats: boolean, - updatedAccounts: string[] = null - ): Promise { - const accountsToAdd: unknown[] = [] - const wrappedAccountsToAdd: ShardusTypes.WrappedData[] = [] - const failedHashes: string[] = [] - for (const wrappedAccount of accountRecords) { - const { accountId, stateId, data: recordData, timestamp } = wrappedAccount - const hash = this.app.calculateAccountHash(recordData) - const cycleToRecordOn = CycleChain.getCycleNumberFromTimestamp(wrappedAccount.timestamp) - if (cycleToRecordOn <= -1) { - this.statemanager_fatal( - `checkAndSetAccountData cycleToRecordOn==-1`, - `checkAndSetAccountData cycleToRecordOn==-1 ${wrappedAccount.timestamp}` - ) - failedHashes.push(accountId) - return failedHashes - } - //TODO perf remove this when we are satisfied with the situation - //Additional testing to cache if we try to overrite with older data - if (this.accountCache.hasAccount(accountId)) { - const accountMemData: AccountHashCache = this.accountCache.getAccountHash(accountId) - if (timestamp < accountMemData.t) { - //should update cache anyway (older value may be needed) - - // I have doubts that cache should be able to roll a value back.. - this.accountCache.updateAccountHash( - wrappedAccount.accountId, - wrappedAccount.stateId, - wrappedAccount.timestamp, - cycleToRecordOn - ) - - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`setAccountData: abort. checkAndSetAccountData older timestamp note:${note} acc: ${utils.makeShortHash(accountId)} timestamp:${timestamp} accountMemData.t:${accountMemData.t} hash: ${utils.makeShortHash(hash)} cache:${utils.stringifyReduce(accountMemData)}`) - continue //this is a major error need to skip the writing. - } - } - - if (stateId.length === hash.length && timingSafeEqual(Buffer.from(stateId), Buffer.from(hash))) { - accountsToAdd.push(recordData) - wrappedAccountsToAdd.push(wrappedAccount) - - if (updatedAccounts != null) { - updatedAccounts.push(accountId) - } - - const debugString = `setAccountData: note:${note} acc: ${utils.makeShortHash( - accountId - )} hash: ${utils.makeShortHash(hash)} ts:${wrappedAccount.timestamp}` - if (logFlags.debug) this.mainLogger.debug(debugString) - if (logFlags.verbose) console.log(debugString) - - if (wrappedAccount.timestamp === 0) { - const stack = new Error().stack - - this.statemanager_fatal( - `checkAndSetAccountData ts=0`, - `checkAndSetAccountData ts=0 ${debugString} ${stack}` - ) - } - - if (processStats) { - if (this.accountCache.hasAccount(accountId)) { - //TODO STATS BUG.. this is what can cause one form of stats bug. - //we may have covered this account in the past, then not covered it, and now we cover it again. Stats doesn't know how to repair - // this situation. - //TODO, need a way to re-init.. dang idk how to do that! - //this.partitionStats.statsDataSummaryUpdate2(cycleToRecordOn, null, wrapedAccount) - - const tryToCorrectStats = true - if (tryToCorrectStats) { - /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.getAccountDataByList') - const accounts = await this.app.getAccountDataByList([wrappedAccount.accountId]) - /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.getAccountDataByList', DebugComplete.Completed) - if (accounts != null && accounts.length === 1) { - this.partitionStats.statsDataSummaryUpdate( - cycleToRecordOn, - accounts[0].data, - wrappedAccount, - 'checkAndSetAccountData-' + note - ) - } - } else { - //old way - this.accountCache.updateAccountHash( - wrappedAccount.accountId, - wrappedAccount.stateId, - wrappedAccount.timestamp, - cycleToRecordOn - ) - } - } else { - //I think some work was done to fix diverging stats, but how did it turn out? - this.partitionStats.statsDataSummaryInit( - cycleToRecordOn, - wrappedAccount.accountId, - wrappedAccount.data, - 'checkAndSetAccountData-' + note - ) - } - } else { - //even if we do not process stats still need to update cache - //todo maybe even take the stats out of the pipeline for updating cache? (but that is kinda tricky) - this.accountCache.updateAccountHash( - wrappedAccount.accountId, - wrappedAccount.stateId, - wrappedAccount.timestamp, - cycleToRecordOn - ) - } - } else { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`setAccountData hash test failed: setAccountData for account ${utils.makeShortHash(accountId)} expected account hash: ${utils.makeShortHash(stateId)} got ${utils.makeShortHash(hash)} `) - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('setAccountData hash test failed: details: ' + utils.stringifyReduce(recordData)) - /* prettier-ignore */ if (logFlags.verbose) console.log(`setAccountData hash test failed: setAccountData for account ${utils.makeShortHash(accountId)} expected account hash: ${utils.makeShortHash(stateId)} got ${utils.makeShortHash(hash)} `) - /* prettier-ignore */ if (logFlags.verbose) console.log('setAccountData hash test failed: details: ' + utils.stringifyReduce(recordData)) - failedHashes.push(accountId) - } - } - /* prettier-ignore */ if (logFlags.debug) this.mainLogger.debug(`setAccountData toAdd:${accountsToAdd.length} failed:${failedHashes.length}`) - /* prettier-ignore */ if (logFlags.verbose) console.log(`setAccountData toAdd:${accountsToAdd.length} failed:${failedHashes.length}`) - /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.setAccountData') - await this.app.setAccountData(accountsToAdd) - /* prettier-ignore */ this.transactionQueue.setDebugLastAwaitedCallInner('ths.app.setAccountData', DebugComplete.Completed) - this.transactionQueue.processNonceQueue(wrappedAccountsToAdd) - return failedHashes - } - - /*** - * ######## ## ## ######## ######## ####### #### ## ## ######## ###### - * ## ### ## ## ## ## ## ## ## ## ### ## ## ## ## - * ## #### ## ## ## ## ## ## ## ## #### ## ## ## - * ###### ## ## ## ## ## ######## ## ## ## ## ## ## ## ###### - * ## ## #### ## ## ## ## ## ## ## #### ## ## - * ## ## ### ## ## ## ## ## ## ## ### ## ## ## - * ######## ## ## ######## ## ####### #### ## ## ## ###### - */ - - _registerListener(emitter: EventEmitter, event: string, callback: Callback) { - // eslint-disable-next-line security/detect-object-injection - if (this._listeners[event]) { - this.statemanager_fatal(`_registerListener_dupes`, 'State Manager can only register one listener per event!') - return - } - emitter.on(event, callback) - // eslint-disable-next-line security/detect-object-injection - this._listeners[event] = [emitter, callback] - } - - _unregisterListener(event: string) { - /* eslint-disable security/detect-object-injection */ - if (!this._listeners[event]) { - this.mainLogger.warn(`This event listener doesn't exist! Event: \`${event}\` in StateManager`) - return - } - const entry = this._listeners[event] - const [emitter, callback] = entry - emitter.removeListener(event, callback) - delete this._listeners[event] - /* eslint-enable security/detect-object-injection */ - } - - _cleanupListeners() { - for (const event of Object.keys(this._listeners)) { - this._unregisterListener(event) - } - } - - registerEndpoints() { - // alternatively we would need to query for accepted tx. - - this.accountGlobals.setupHandlers() - - this.depricated.setupHandlers() - - if (this.partitionObjects != null) { - this.partitionObjects.setupHandlers() - } - - this.transactionQueue.setupHandlers() - - this.accountSync.setupHandlers() - - this.transactionConsensus.setupHandlers() - - this.accountPatcher.setupHandlers() - - this.cachedAppDataManager.setupHandlers() - - this.partitionStats.setupHandlers() - - // p2p ASK - // this.p2p.registerInternal( - // 'request_receipt_for_tx_old', - // async ( - // payload: RequestReceiptForTxReq, - // respond: (arg0: RequestReceiptForTxResp_old) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('request_receipt_for_tx_old', false, msgSize) - - // const response: RequestReceiptForTxResp_old = { receipt: null, note: '', success: false } - - // let responseSize = cUninitializedSize - // try { - // let queueEntry = this.transactionQueue.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // queueEntry = this.transactionQueue.getQueueEntryArchived(payload.txid, 'request_receipt_for_tx') // , payload.timestamp) - // } - - // if (queueEntry == null) { - // response.note = `failed to find queue entry: ${utils.stringifyReduce(payload.txid)} ${ - // payload.timestamp - // } dbg:${this.debugTXHistory[utils.stringifyReduce(payload.txid)]}` - // await respond(response) - // return - // } - - // if (queueEntry.appliedReceipt != null) { - // response.receipt = queueEntry.appliedReceipt - // } else if (queueEntry.recievedAppliedReceipt != null) { - // response.receipt = queueEntry.recievedAppliedReceipt - // } - // if (response.receipt != null) { - // response.success = true - // } else { - // response.note = `found queueEntry but no receipt: ${utils.stringifyReduce(payload.txid)} ${ - // payload.txid - // } ${payload.timestamp}` - // } - // responseSize = await respond(response) - // } finally { - // profilerInstance.scopedProfileSectionEnd('request_receipt_for_tx_old', responseSize) - // } - // } - // ) - - // p2p ASK - // this.p2p.registerInternal( - // 'request_receipt_for_tx', - // async ( - // payload: RequestReceiptForTxReq, - // respond: (arg0: RequestReceiptForTxResp) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('request_receipt_for_tx', false, msgSize) - - // const response: RequestReceiptForTxResp = { receipt: null, note: '', success: false } - - // let responseSize = cUninitializedSize - // try { - // let queueEntry = this.transactionQueue.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // queueEntry = this.transactionQueue.getQueueEntryArchived(payload.txid, 'request_receipt_for_tx') // , payload.timestamp) - // } - - // if (queueEntry == null) { - // response.note = `failed to find queue entry: ${utils.stringifyReduce(payload.txid)} ${ - // payload.timestamp - // } dbg:${this.debugTXHistory[utils.stringifyReduce(payload.txid)]}` - // await respond(response) - // return - // } - - // response.receipt = this.getReceipt2(queueEntry) - - // if (response.receipt != null) { - // response.success = true - // } else { - // response.note = `found queueEntry but no receipt: ${utils.stringifyReduce(payload.txid)} ${ - // payload.txid - // } ${payload.timestamp}` - // } - // responseSize = await respond(response) - // } finally { - // profilerInstance.scopedProfileSectionEnd('request_receipt_for_tx', responseSize) - // } - // } - // ) - - const requestReceiptForTxBinaryHandler: P2PTypes.P2PTypes.Route> = { - name: InternalRouteEnum.binary_request_receipt_for_tx, - handler: (payload, respond) => { - const route = InternalRouteEnum.binary_request_receipt_for_tx - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - nestedCountersInstance.countEvent('stateManager', route) - - const response: RequestReceiptForTxRespSerialized = { receipt: null, note: '', success: false } - try { - const req = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestReceiptForTxReq) - const deserialized = deserializeRequestReceiptForTxReq(req) - let queueEntry = this.transactionQueue.getQueueEntrySafe(deserialized.txid) - if (queueEntry == null) { - queueEntry = this.transactionQueue.getQueueEntryArchived(deserialized.txid, route) - } - - if (queueEntry == null) { - response.note = `failed to find queue entry: ${utils.stringifyReduce(deserialized.txid)} ${ - deserialized.timestamp - } dbg:${this.debugTXHistory[utils.stringifyReduce(deserialized.txid)]}` - respond(response, serializeRequestReceiptForTxResp) - return - } - - if (queueEntry.acceptedTx?.timestamp !== deserialized.timestamp) { - response.note = `requested timestamp does not match txid: ${utils.stringifyReduce(deserialized.txid)} - request: ${deserialized.timestamp} - queueuEntry timestamp: ${queueEntry.acceptedTx?.timestamp} - dbg:${this.debugTXHistory[utils.stringifyReduce(deserialized.txid)]}` - respond(response, serializeRequestReceiptForTxResp) - return - } - - response.receipt = this.getSignedReceipt(queueEntry) - if (response.receipt != null) { - response.success = true - } else { - response.note = `found queueEntry but no receipt: ${utils.stringifyReduce(deserialized.txid)} ${ - deserialized.txid - } ${deserialized.timestamp}` - } - respond(response, serializeRequestReceiptForTxResp) - } catch (e) { - this.mainLogger.error(`${route} error: ${e.message} stack: ${e.stack}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - respond(response, serializeRequestReceiptForTxResp) - } finally { - profilerInstance.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary(requestReceiptForTxBinaryHandler.name, requestReceiptForTxBinaryHandler.handler) - - // this.p2p.registerInternal( - // 'request_state_for_tx_post', - // async ( - // payload: RequestStateForTxReqPost, - // respond: (arg0: RequestStateForTxResp) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('request_state_for_tx_post', false, msgSize) - // let responseSize = cUninitializedSize - // try { - // const response: RequestStateForTxResp = { - // stateList: [], - // beforeHashes: {}, - // note: '', - // success: false, - // } - // // app.getRelevantData(accountId, tx) -> wrappedAccountState for local accounts - // let queueEntry = this.transactionQueue.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // queueEntry = this.transactionQueue.getQueueEntryArchived( - // payload.txid, - // 'request_state_for_tx_post' - // ) // , payload.timestamp) - // } - - // if (queueEntry == null) { - // response.note = `failed to find queue entry: ${utils.stringifyReduce(payload.txid)} ${ - // payload.timestamp - // } dbg:${this.debugTXHistory[utils.stringifyReduce(payload.txid)]}` - // /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'request_state_for_tx_post cant find queue entry') - // await respond(response) - // return - // } - - // if (queueEntry.hasValidFinalData === false) { - // response.note = `has queue entry but not final data: ${utils.stringifyReduce(payload.txid)} ${ - // payload.timestamp - // } dbg:${this.debugTXHistory[utils.stringifyReduce(payload.txid)]}` - // /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `request_state_for_tx_post hasValidFinalData==false, tx state: ${queueEntry.state}`) - // await respond(response) - // return - // } - - // let wrappedStates = this.useAccountWritesOnly ? {} : queueEntry.collectedData - - // // if we have applyResponse then use it. This is where and advanced apply() will put its transformed data - // const writtenAccountsMap: WrappedResponses = {} - // const applyResponse = queueEntry?.preApplyTXResult.applyResponse - // if ( - // applyResponse != null && - // applyResponse.accountWrites != null && - // applyResponse.accountWrites.length > 0 - // ) { - // for (const writtenAccount of applyResponse.accountWrites) { - // writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - // } - // wrappedStates = writtenAccountsMap - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_state_for_tx_post applyResponse.accountWrites tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp} accounts: ${utils.stringifyReduce(Object.keys(wrappedStates))} `) - // } - - // //TODO figure out if we need to include collectedFinalData (after refactor/cleanup) - - // if (wrappedStates != null) { - // for (const [key, accountData] of Object.entries(wrappedStates)) { - // if (payload.key !== accountData.accountId) { - // continue //not this account. - // } - - // if (accountData.stateId != payload.hash) { - // response.note = `failed accountData.stateId != payload.hash txid: ${utils.makeShortHash( - // payload.txid - // )} ts:${payload.timestamp} hash:${utils.makeShortHash(accountData.stateId)}` - // response.success = false - // /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'request_state_for_tx_post failed accountData.stateId != payload.hash txid') - // await respond(response) - // return - // } - // if (accountData) { - // //include the before hash - // // eslint-disable-next-line security/detect-object-injection - // response.beforeHashes[key] = queueEntry.beforeHashes[key] - // //include the data - // response.stateList.push(accountData) - // } - // } - // } - - // nestedCountersInstance.countEvent('stateManager', 'request_state_for_tx_post success') - // response.success = true - // responseSize = await respond(response) - // } finally { - // profilerInstance.scopedProfileSectionEnd('request_state_for_tx_post', responseSize) - // } - // } - // ) - - const requestStateForTxPostBinaryHandler: Route> = { - name: InternalRouteEnum.binary_request_state_for_tx_post, - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_request_state_for_tx_post - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - nestedCountersInstance.countEvent('internal', route) - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - try { - const response: RequestStateForTxPostResp = { - stateList: [], - beforeHashes: {}, - note: '', - success: false, - } - - const txId = header.verification_data - let queueEntry = this.transactionQueue.getQueueEntrySafe(txId) - if (queueEntry == null) { - queueEntry = this.transactionQueue.getQueueEntryArchived(txId, route) - } - if (queueEntry == null) { - response.note = `failed to find queue entry: ${utils.stringifyReduce(txId)} dbg:${ - this.debugTXHistory[utils.stringifyReduce(txId)] - }` - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `${route} cant find queue entry`) - return respond(response, serializeRequestStateForTxPostResp) - } - - if (queueEntry.hasValidFinalData === false) { - response.note = `has queue entry but not final data: ${utils.stringifyReduce(txId)} dbg:${ - this.debugTXHistory[utils.stringifyReduce(txId)] - }` - - if (logFlags.error && logFlags.verbose) this.mainLogger.error(response.note) - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `${route} hasValidFinalData==false, tx state: ${queueEntry.state}`) - return respond(response, serializeRequestStateForTxPostResp) - } - - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestStateForTxPostReq) - if (!requestStream) { - errorHandler(RequestErrorEnum.InvalidRequest) - return respond(response, serializeRequestStateForTxPostResp) - } - - const req = deserializeRequestStateForTxPostReq(requestStream) - // app.getRelevantData(accountId, tx) -> wrappedAccountState for local accounts - let wrappedStates = this.useAccountWritesOnly ? {} : queueEntry.collectedData - const applyResponse = queueEntry?.preApplyTXResult.applyResponse - if (applyResponse != null && applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { - const writtenAccountsMap: WrappedResponses = {} - for (const writtenAccount of applyResponse.accountWrites) { - writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - } - wrappedStates = writtenAccountsMap - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_state_for_tx_post applyResponse.accountWrites tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp} accounts: ${utils.stringifyReduce(Object.keys(wrappedStates))}`) - } - - if (wrappedStates != null) { - for (const [key, accountData] of Object.entries(wrappedStates)) { - if (req.key !== accountData.accountId) { - continue // Not this account. - } - - if (accountData.stateId != req.hash) { - response.note = `failed accountData.stateId != req.hash txid: ${utils.makeShortHash( - req.txid - )} hash:${utils.makeShortHash(accountData.stateId)}` - /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', `${route} failed accountData.stateId != req.hash txid`) - return respond(response, serializeRequestStateForTxPostResp) - } - if (accountData) { - response.beforeHashes[key] = queueEntry.beforeHashes[key] - response.stateList.push(accountData) - } - } - } - nestedCountersInstance.countEvent('stateManager', `${route} success`) - response.success = true - return respond(response, serializeRequestStateForTxPostResp) - } catch (e) { - if (logFlags.error) this.mainLogger.error(`${route} error: ${utils.errorToStringFull(e)}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - respond({ stateList: [], beforeHashes: {}, note: '', success: false }, serializeRequestStateForTxPostResp) - } finally { - profilerInstance.scopedProfileSectionEnd(route, payload.length) - } - }, - } - - this.p2p.registerInternalBinary(requestStateForTxPostBinaryHandler.name, requestStateForTxPostBinaryHandler.handler) - - // Comms.registerInternal( - // 'request_tx_and_state', - // async ( - // payload: { txid: string; accountIds: string[] }, - // respond: (arg0: RequestTxResp) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('request_tx_and_state', false, msgSize) - // let responseSize = cUninitializedSize - // try { - // let response: RequestTxResp = { - // stateList: [], - // account_state_hash_before: {}, - // account_state_hash_after: {}, - // note: '', - // success: false, - // // originalData: {}, - // } - - // const txid = payload.txid - // const requestedAccountIds = payload.accountIds - - // let queueEntry = this.transactionQueue.getQueueEntrySafe(txid) - // if (queueEntry == null) { - // queueEntry = this.transactionQueue.getQueueEntryArchived(txid, 'request_tx_and_state') - // } - - // if (queueEntry == null) { - // response.note = `failed to find queue entry: ${utils.stringifyReduce(txid)} dbg:${ - // this.debugTXHistory[utils.stringifyReduce(txid)] - // }` - - // if (logFlags.error) this.mainLogger.error(`request_tx_and_state ${response.note}`) - // await respond(response) - // return - // } - - // if (queueEntry.isInExecutionHome === false) { - // response.note = `request_tx_and_state not in execution group: ${utils.stringifyReduce(txid)}` - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) - // await respond(response) - // return - // } - - // let receipt2 = this.getReceipt2(queueEntry) - // if (receipt2 == null) { - // response.note = `request_tx_and_state does not have valid receipt2: ${utils.stringifyReduce( - // txid - // )}` - // /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) - // await respond(response) - // return - // } - - // let wrappedStates = this.useAccountWritesOnly ? {} : queueEntry.collectedData - - // // if we have applyResponse then use it. This is where and advanced apply() will put its transformed data - // const writtenAccountsMap: WrappedResponses = {} - // const applyResponse = queueEntry?.preApplyTXResult.applyResponse - // if ( - // applyResponse != null && - // applyResponse.accountWrites != null && - // applyResponse.accountWrites.length > 0 - // ) { - // for (const writtenAccount of applyResponse.accountWrites) { - // writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - // } - // wrappedStates = writtenAccountsMap - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state applyResponse.accountWrites tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp} accounts: ${utils.stringifyReduce(Object.keys(wrappedStates))} `) - // } - - // //TODO figure out if we need to include collectedFinalData (after refactor/cleanup) - - // if (wrappedStates != null) { - // for (let i = 0; i < receipt2.appliedVote.account_id.length; i++) { - // let key = receipt2.appliedVote.account_id[i] - // let accountData = wrappedStates[key] - // if (accountData && requestedAccountIds.includes(key)) { - // // eslint-disable-next-line security/detect-object-injection - // response.account_state_hash_before[key] = receipt2.appliedVote.account_state_hash_before[i] - // response.account_state_hash_after[key] = receipt2.appliedVote.account_state_hash_after[i] - // response.stateList.push(accountData) - // } - // } - // } - // response.success = true - // /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state success: ${queueEntry.logID} ${response.stateList.length} ${Utils.safeStringify(response)}`) - // responseSize = await respond(response) - // } finally { - // profilerInstance.scopedProfileSectionEnd('request_tx_and_state', responseSize) - // } - // } - // ) - - const requestTxAndStateBinaryHandler: Route> = { - name: InternalRouteEnum.binary_request_tx_and_state, - // eslint-disable-next-line @typescript-eslint/no-unused-vars - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_request_tx_and_state - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, false, payload.length) - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - let response: RequestTxResp = { - stateList: [], - account_state_hash_before: {}, - account_state_hash_after: {}, - note: '', - success: false, - appReceiptData: null, - } - try { - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestTxAndStateReq) - if (!requestStream) { - errorHandler(RequestErrorEnum.InvalidRequest) - respond(response, serializeRequestTxAndStateResp) - return - } - - const req: RequestTxAndStateReq = deserializeRequestTxAndStateReq(requestStream) - - const txid = req.txid - const requestedAccountIds = req.accountIds - - let queueEntry = this.transactionQueue.getQueueEntrySafe(txid) - if (queueEntry == null) { - queueEntry = this.transactionQueue.getQueueEntryArchived(txid, route) - } - - if (queueEntry == null) { - response.note = `failed to find queue entry: ${utils.stringifyReduce(txid)} dbg:${ - this.debugTXHistory[utils.stringifyReduce(txid)] - }` - - if (logFlags.error) this.mainLogger.error(`${route} ${response.note}`) - respond(response, serializeRequestTxAndStateResp) - return - } - - if (queueEntry.isInExecutionHome === false) { - response.note = `${route} not in execution group: ${utils.stringifyReduce(txid)}` - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) - respond(response, serializeRequestTxAndStateResp) - return - } - - let receipt2 = this.getSignedReceipt(queueEntry) - if (receipt2 == null) { - response.note = `${route} does not have valid receipt2: ${utils.stringifyReduce(txid)}` - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) - respond(response, serializeRequestTxAndStateResp) - return - } - - let wrappedStates = this.useAccountWritesOnly ? {} : queueEntry.collectedData - - // if we have applyResponse then use it. This is where and advanced apply() will put its transformed data - const writtenAccountsMap: WrappedResponses = {} - const applyResponse = queueEntry?.preApplyTXResult.applyResponse - if (applyResponse != null && applyResponse.accountWrites != null && applyResponse.accountWrites.length > 0) { - for (const writtenAccount of applyResponse.accountWrites) { - writtenAccountsMap[writtenAccount.accountId] = writtenAccount.data - } - wrappedStates = writtenAccountsMap - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state applyResponse.accountWrites tx:${queueEntry.logID} ts:${queueEntry.acceptedTx.timestamp} accounts: ${utils.stringifyReduce(Object.keys(wrappedStates))} `) - } - - //TODO figure out if we need to include collectedFinalData (after refactor/cleanup) - - if (wrappedStates != null) { - for (let i = 0; i < receipt2.proposal.accountIDs.length; i++) { - let key = receipt2.proposal.accountIDs[i] - let accountData = wrappedStates[key] - if (accountData && requestedAccountIds.includes(key)) { - // eslint-disable-next-line security/detect-object-injection - response.account_state_hash_before[key] = receipt2.proposal.beforeStateHashes[i] - response.account_state_hash_after[key] = receipt2.proposal.afterStateHashes[i] - response.stateList.push(accountData) - } - } - response.appReceiptData = queueEntry.preApplyTXResult?.applyResponse?.appReceiptData - } - response.success = true - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state success: ${queueEntry.logID} ${response.stateList.length} ${Utils.safeStringify(response)}`) - respond(response, serializeRequestTxAndStateResp) - } catch (e) { - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) Context.logger.getLogger('p2p').error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - respond(response, serializeRequestTxAndStateResp) - } finally { - this.profiler.scopedProfileSectionEnd(route) - } - }, - } - - const requestTxAndStateBeforeBinaryHandler: Route> = { - name: InternalRouteEnum.binary_request_tx_and_state_before, - // eslint-disable-next-line @typescript-eslint/no-unused-vars - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_request_tx_and_state_before - nestedCountersInstance.countEvent('internal', route) - this.profiler.scopedProfileSectionStart(route, false, payload.length) - const errorHandler = ( - errorType: RequestErrorEnum, - opts?: { customErrorLog?: string; customCounterSuffix?: string } - ): void => requestErrorHandler(route, errorType, header, opts) - - let response: RequestTxResp = { - stateList: [], - account_state_hash_before: {}, - account_state_hash_after: {}, - note: '', - success: false, - } - try { - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cRequestTxAndStateReq) - if (!requestStream) { - errorHandler(RequestErrorEnum.InvalidRequest) - respond(response, serializeRequestTxAndStateResp) - return - } - - const req: RequestTxAndStateReq = deserializeRequestTxAndStateReq(requestStream) - - const txid = req.txid - const requestedAccountIds = req.accountIds - - let queueEntry = this.transactionQueue.getQueueEntrySafe(txid) - if (queueEntry == null) { - queueEntry = this.transactionQueue.getQueueEntryArchived(txid, route) - } - - if (queueEntry == null) { - response.note = `failed to find queue entry: ${utils.stringifyReduce(txid)} dbg:${ - this.debugTXHistory[utils.stringifyReduce(txid)] - }` - - if (logFlags.error) this.mainLogger.error(`${route} ${response.note}`) - respond(response, serializeRequestTxAndStateResp) - return - } - - if (queueEntry.isInExecutionHome === false) { - response.note = `${route} not in execution group: ${utils.stringifyReduce(txid)}` - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) - respond(response, serializeRequestTxAndStateResp) - return - } - - let receipt2 = this.getSignedReceipt(queueEntry) - if (receipt2 == null) { - response.note = `${route} does not have valid receipt2: ${utils.stringifyReduce(txid)}` - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(response.note) - respond(response, serializeRequestTxAndStateResp) - return - } - - // we just need to send collected state - for (const accountId of requestedAccountIds) { - const beforeState = queueEntry.collectedData[accountId] - const index = receipt2.proposal.accountIDs.indexOf(accountId) - if (beforeState && beforeState.stateId === receipt2.proposal.beforeStateHashes[index]) { - response.stateList.push(queueEntry.collectedData[accountId]) - } else { - response.note = `has bad beforeStateAccount: ${utils.stringifyReduce(txid)} dbg:${ - this.debugTXHistory[utils.stringifyReduce(txid)] - }` - if (logFlags.error) this.mainLogger.error(`${route} ${response.note}`) - respond(response, serializeRequestTxAndStateResp) - return - } - } - response.success = true - /* prettier-ignore */ if (logFlags.verbose) this.mainLogger.debug(`request_tx_and_state_before success: ${queueEntry.logID} ${response.stateList.length} ${Utils.safeStringify(response)}`) - respond(response, serializeRequestTxAndStateResp) - } catch (e) { - nestedCountersInstance.countEvent('internal', `${route}-exception`) - /* prettier-ignore */ if (logFlags.error) Context.logger.getLogger('p2p').error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - respond(response, serializeRequestTxAndStateResp) - } finally { - this.profiler.scopedProfileSectionEnd(route) - } - }, - } - - this.p2p.registerInternalBinary(requestTxAndStateBinaryHandler.name, requestTxAndStateBinaryHandler.handler) - - this.p2p.registerInternalBinary( - requestTxAndStateBeforeBinaryHandler.name, - requestTxAndStateBeforeBinaryHandler.handler - ) - - // TODO STATESHARDING4 ENDPOINTS ok, I changed this to tell, but we still need to check sender! - //this.p2p.registerGossipHandler('spread_appliedVote', async (payload, sender, tracker) => { - // this.p2p.registerInternal( - // 'spread_appliedVote', - // async ( - // payload: AppliedVote, - // _respond: unknown, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('spread_appliedVote', false, msgSize) - // try { - // const queueEntry = this.transactionQueue.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'spread_appliedVote_no_queue_entry') - // return - // } - // const newVote = payload as AppliedVote - // // TODO STATESHARDING4 ENDPOINTS check payload format - // // TODO STATESHARDING4 ENDPOINTS that this message is from a valid sender (may need to check docs) - - // if (this.transactionConsensus.tryAppendVote(queueEntry, newVote)) { - // // Note this was sending out gossip, but since this needs to be converted to a tell function i deleted the gossip send - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('spread_appliedVote') - // } - // } - // ) - - // this.p2p.registerInternal( - // 'spread_appliedVoteHash', - // async ( - // payload: AppliedVoteHash, - // _respond: unknown, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // // TODO: can be replaced with poqo-send-vote so can be removed - // profilerInstance.scopedProfileSectionStart('spread_appliedVoteHash', false, msgSize) - // try { - // const queueEntry = this.transactionQueue.getQueueEntrySafe(payload.txid) // , payload.timestamp) - // if (queueEntry == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('stateManager', 'spread_appliedVoteHash_no_queue_entry') - // return - // } - // const collectedVoteHash = payload as AppliedVoteHash - // // TODO STATESHARDING4 ENDPOINTS check payload format - // // TODO STATESHARDING4 ENDPOINTS that this message is from a valid sender (correct consenus group and valid signature) - - // if (this.transactionConsensus.tryAppendVoteHash(queueEntry, collectedVoteHash)) { - // // Note this was sending out gossip, but since this needs to be converted to a tell function i deleted the gossip send - // } - // } finally { - // profilerInstance.scopedProfileSectionEnd('spread_appliedVoteHash') - // } - // } - // ) - - // const spreadAppliedVoteHashBinaryHandler: Route> = { - // name: InternalRouteEnum.binary_spread_appliedVoteHash, - // // eslint-disable-next-line @typescript-eslint/no-unused-vars - // handler: async (payload, respond, header, sign) => { - // const route = InternalRouteEnum.binary_spread_appliedVoteHash - // nestedCountersInstance.countEvent('internal', route) - // this.profiler.scopedProfileSectionStart(route, false, payload.length) - // const errorHandler = ( - // errorType: RequestErrorEnum, - // opts?: { customErrorLog?: string; customCounterSuffix?: string } - // ): void => requestErrorHandler(route, errorType, header, opts) - - // try { - // // Type check the request - // const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cSpreadAppliedVoteHash) - // if (!requestStream) { - // return errorHandler(RequestErrorEnum.InvalidRequest) - // } - // const req = deserializeSpreadAppliedVoteHashReq(requestStream) - // const queueEntry = this.transactionQueue.getQueueEntrySafe(req.txid) - // if (queueEntry == null) { - // /* prettier-ignore */ nestedCountersInstance.countEvent('internal', `${route}-no_queue_entry`) - // return - // } - // const collectedVoteHash = req as AppliedVoteHash - - // if (this.transactionConsensus.tryAppendVoteHash(queueEntry, collectedVoteHash)) { - // // Note this was sending out gossip, but since this needs to be converted to a tell function i deleted the gossip send - // } - // } catch (e) { - // nestedCountersInstance.countEvent('internal', `${route}-exception`) - // this.mainLogger.error(`${route}: Exception executing request: ${utils.errorToStringFull(e)}`) - // } finally { - // this.profiler.scopedProfileSectionEnd(route) - // } - // }, - // } - - // this.p2p.registerInternalBinary( - // spreadAppliedVoteHashBinaryHandler.name, - // spreadAppliedVoteHashBinaryHandler.handler - // ) - - // this.p2p.registerInternal( - // 'get_account_data_with_queue_hints', - // async ( - // payload: { accountIds: string[] }, - // respond: (arg0: GetAccountDataWithQueueHintsResp | false) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('get_account_data_with_queue_hints', false, msgSize) - // let responseSize = cUninitializedSize - // try { - // const result = {} as GetAccountDataWithQueueHintsResp //TSConversion This is complicated !! check app for details. - // let accountData = null - // let ourLockID = -1 - // try { - // ourLockID = await this.fifoLock('accountModification') - // accountData = await this.app.getAccountDataByList(payload.accountIds) - // } finally { - // this.fifoUnlock('accountModification', ourLockID) - // } - // if (accountData != null) { - // for (const wrappedAccount of accountData) { - // const wrappedAccountInQueueRef = wrappedAccount as ShardusTypes.WrappedDataFromQueue - // wrappedAccountInQueueRef.seenInQueue = false - - // if (this.lastSeenAccountsMap != null) { - // const queueEntry = this.lastSeenAccountsMap[wrappedAccountInQueueRef.accountId] - // if (queueEntry != null) { - // wrappedAccountInQueueRef.seenInQueue = true - // } - // } - // } - // } - // //PERF Disiable this in production or performance testing. / this works due to inheritance - // //this can throw an error an result in a non response - // this.testAccountDataWrapped(accountData) - // // we cast up the array return type because we have attached the seenInQueue memeber to the data. - // result.accountData = accountData as ShardusTypes.WrappedDataFromQueue[] - // responseSize = await respond(result) - // } catch (ex) { - // //we dont want to delay. let the asking node know qukcly so it can try again - // responseSize = await respond(false) - // } finally { - // profilerInstance.scopedProfileSectionEnd('get_account_data_with_queue_hints', responseSize) - // } - // } - // ) - - const binaryGetAccDataWithQueueHintsHandler: Route> = { - name: InternalRouteEnum.binary_get_account_data_with_queue_hints, - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_get_account_data_with_queue_hints - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - nestedCountersInstance.countEvent('internal', route) - - try { - let accountData = null - const requestStream = getStreamWithTypeCheck(payload, TypeIdentifierEnum.cGetAccountDataWithQueueHintsReq) - if (!requestStream) { - // implement error handling - nestedCountersInstance.countEvent('internal', `${route}-invalid_request`) - return respond(BadRequest(`${route} invalid request`), serializeResponseError) - } - const req = deserializeGetAccountDataWithQueueHintsReq(requestStream) - const MAX_ACCOUNTS = this.config.stateManager.accountBucketSize - if (req.accountIds.length > MAX_ACCOUNTS) { - nestedCountersInstance.countEvent('internal', `${route}-too_many_accounts`) - return respond(BadRequest(`${route} too many accounts requested`), serializeResponseError) - } - if (utils.isValidShardusAddress(req.accountIds) === false) { - nestedCountersInstance.countEvent('internal', `${route}-invalid_account_ids`) - return respond(BadRequest(`${route} invalid account_ids`), serializeResponseError) - } - let ourLockID = -1 - try { - ourLockID = await this.fifoLock('accountModification') - accountData = await this.app.getAccountDataByList(req.accountIds) - } finally { - this.fifoUnlock('accountModification', ourLockID) - } - if (accountData != null) { - for (const wrappedAccount of accountData) { - const wrappedAccountInQueueRef = wrappedAccount as WrappedDataFromQueueSerializable - wrappedAccountInQueueRef.seenInQueue = false - - if (this.lastSeenAccountsMap != null) { - const queueEntry = this.lastSeenAccountsMap[wrappedAccountInQueueRef.accountId] - if (queueEntry != null) { - wrappedAccountInQueueRef.seenInQueue = true - } - } - } - } - - const resp: GetAccountDataWithQueueHintsResp = { - accountData: accountData as WrappedDataFromQueueSerializable[] | null, - // this can still be null - } - respond(resp, serializeGetAccountDataWithQueueHintsResp) - } catch (e) { - if (logFlags.error || logFlags.getLocalOrRemote) - this.mainLogger.error(`${route} error: ${utils.errorToStringFull(e)}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `handler: ${e.message} `) - return respond(InternalError(`${route} exception executing request`), serializeResponseError) - } finally { - profilerInstance.scopedProfileSectionEnd(route, payload.length) - } - }, - } - - this.p2p.registerInternalBinary( - binaryGetAccDataWithQueueHintsHandler.name, - binaryGetAccDataWithQueueHintsHandler.handler - ) - - // this.p2p.registerInternal( - // 'get_account_queue_count', - // async ( - // payload: RequestAccountQueueCounts, - // respond: (arg0: QueueCountsResponse) => Promise, - // _sender: unknown, - // _tracker: string, - // msgSize: number - // ) => { - // profilerInstance.scopedProfileSectionStart('get_account_queue_count', false, msgSize) - - // let responseSize = cUninitializedSize - // try { - // const result: QueueCountsResponse = { counts: [], committingAppData: [], accounts: [] } - // for (const address of payload.accountIds) { - // const { count, committingAppData } = this.transactionQueue.getAccountQueueCount(address, true) - // result.counts.push(count) - // result.committingAppData.push(committingAppData) - // if (this.config.stateManager.enableAccountFetchForQueueCounts) { - // const currentAccountData = await this.getLocalOrRemoteAccount(address) - // if (currentAccountData && currentAccountData.data) { - // result.accounts.push(currentAccountData.data) - // } - // } - // } - - // responseSize = await respond(result) - // } finally { - // profilerInstance.scopedProfileSectionEnd('get_account_queue_count', responseSize) - // } - // } - // ) - - const binaryGetAccountQueueCountHandler: Route> = { - name: InternalRouteEnum.binary_get_account_queue_count, - handler: async (payload, respond, header, sign) => { - const route = InternalRouteEnum.binary_get_account_queue_count - profilerInstance.scopedProfileSectionStart(route, false, payload.length) - nestedCountersInstance.countEvent('internal', route) - try { - const requestStream = VectorBufferStream.fromBuffer(payload) - const requestType = requestStream.readUInt16() - if (requestType !== TypeIdentifierEnum.cGetAccountQueueCountReq) { - // implement error handling - respond(false, serializeGetAccountQueueCountResp) - return - } - const req = deserializeGetAccountQueueCountReq(requestStream) - // Limit the number of accounts to prevent abuse - const MAX_ACCOUNTS = this.config.stateManager.accountBucketSize // default 200 - if (req.accountIds.length > MAX_ACCOUNTS) { - nestedCountersInstance.countEvent('internal', `${route}-too_many_accounts`) - return respond(BadRequest(`${route} too many accounts requested`), serializeResponseError) - } - const result: GetAccountQueueCountResp = { - counts: [], - committingAppData: [], - accounts: [], - } - if (utils.isValidShardusAddress(req.accountIds) === false) { - nestedCountersInstance.countEvent('internal', `${route}-invalid_account_ids`) - respond(false, serializeGetAccountQueueCountResp) - return - } - for (const address of req.accountIds) { - const { count, committingAppData } = this.transactionQueue.getAccountQueueCount(address, true) - result.counts.push(count) - result.committingAppData.push(committingAppData) - if (this.config.stateManager.enableAccountFetchForQueueCounts) { - const currentAccountData = await this.getLocalOrRemoteAccount(address) - if (currentAccountData && currentAccountData.data) { - result.accounts.push(currentAccountData.data) - } - } - } - respond(result, serializeGetAccountQueueCountResp) - } catch (e) { - if (logFlags.error) this.mainLogger.error(`${route} error: ${e}`) - nestedCountersInstance.countEvent('internal', `${route}-exception`) - respond(false, serializeGetAccountQueueCountResp) - } finally { - profilerInstance.scopedProfileSectionEnd(route, payload.length) - } - }, - } - - this.p2p.registerInternalBinary(binaryGetAccountQueueCountHandler.name, binaryGetAccountQueueCountHandler.handler) - - Context.network.registerExternalGet('debug_stats', isDebugModeMiddleware, (_req, res) => { - const cycle = this.currentCycleShardData.cycleNumber - 1 - - let cycleShardValues = null - if (this.shardValuesByCycle.has(cycle)) { - cycleShardValues = this.shardValuesByCycle.get(cycle) - } - - const blob = this.partitionStats.dumpLogsForCycle(cycle, false, cycleShardValues) - res.json({ cycle, blob }) - }) - - Context.network.registerExternalGet('debug_stats2', isDebugModeMiddleware, (_req, res) => { - const cycle = this.currentCycleShardData.cycleNumber - 1 - - let blob = {} - let cycleShardValues = null - if (this.shardValuesByCycle.has(cycle)) { - cycleShardValues = this.shardValuesByCycle.get(cycle) - blob = this.partitionStats.buildStatsReport(cycleShardValues) - } - res.json({ cycle, blob }) - }) - - Context.network.registerExternalGet('clear_tx_debug', isDebugModeMiddlewareLow, (_req, res) => { - this.transactionQueue.clearTxDebugStatList() - res.json({ success: true }) - }) - - Context.network.registerExternalGet('print_tx_debug', isDebugModeMiddlewareLow, (_req, res) => { - const result = this.transactionQueue.printTxDebug() - res.write(result) - res.end() - }) - - Context.network.registerExternalGet('print_tx_debug_by_txid', isDebugModeMiddlewareLow, (_req, res) => { - const txId = _req.query.txId - if (txId == null) { - res.write('txId parameter required') - res.end() - return - } - if (typeof txId !== 'string') { - res.write('txId parameter must be a string') - res.end() - return - } - const result = this.transactionQueue.printTxDebugByTxId(txId) - res.write(result) - res.end() - }) - - Context.network.registerExternalGet('last_process_stats', isDebugModeMiddlewareLow, (_req, res) => { - const result = JSON.stringify(this.transactionQueue.lastProcessStats, null, 2) - res.write(result) - res.end() - }) - - //a debug nodelist so tools can map nodes to the shortIDs that we use - Context.network.registerExternalGet('nodelist_debug', isDebugModeMiddleware, (_req, res) => { - const debugNodeList = [] - for (const node of activeByIdOrder) { - const nodeEntry = { - id: utils.makeShortHash(node.id), - ip: node.externalIp, - port: node.externalPort, - } - debugNodeList.push(nodeEntry) - } - res.json(debugNodeList) - }) - - Context.network.registerExternalGet('debug-consensus-log', isDebugModeMiddleware, (req, res) => { - this.consensusLog = !this.consensusLog - res.write(`consensusLog: ${this.consensusLog}`) - res.end() - }) - - Context.network.registerExternalGet('debug-noncequeue-count', isDebugModeMiddleware, (req, res) => { - let result = this.transactionQueue.getPendingCountInNonceQueue() - res.json(result) - res.end() - }) - - Context.network.registerExternalGet('debug-queue-item-by-txid', isDebugModeMiddlewareLow, (_req, res) => { - const txId = _req.query.txId - if (txId == null || typeof txId !== 'string' || txId.length !== 64) { - res.write('invalid txId provided') - res.end() - return - } - const result = this.transactionQueue.getQueueItemById(txId) - res.write(Utils.safeStringify(result)) - res.end() - }) - - Context.network.registerExternalGet('debug-queue-items', isDebugModeMiddleware, (req, res) => { - let result = this.transactionQueue.getQueueItems() - res.write(Utils.safeStringify(result)) - res.end() - }) - - Context.network.registerExternalGet('debug-queue-clear', isDebugModeMiddleware, (req, res) => { - let minAge = req.query.minAge ? parseInt(req.query.minAge as string) : -1 - if (isNaN(minAge)) minAge = -1 - let result = this.transactionQueue.clearQueueItems(minAge) - res.write(Utils.safeStringify(result)) - res.end() - }) - - Context.network.registerExternalGet('debug-stuck-tx', isDebugModeMiddleware, (_req, res) => { - const opts = { - minAge: _req.query?.minAge || 0, - state: _req.query?.state, - nextStates: _req.query?.nextStates === 'false' ? false : true, - } - res.json(this.transactionQueue.getDebugStuckTxs(opts)) - }) - - Context.network.registerExternalGet('debug-stuck-processing', isDebugModeMiddleware, (_req, res) => { - res.json(this.transactionQueue.getDebugProccessingStatus()) - }) - - Context.network.registerExternalGet('debug-fix-stuck-processing', isDebugModeMiddleware, (req, res) => { - let response = 'not stuck' - - //initialize the variable clear with the value of the query parameter clear, the default is false - const clear = req.query.clear === 'true' || false - - const isStuck = this.transactionQueue.isStuckProcessing - if (isStuck) { - response = Utils.safeStringify(this.transactionQueue.getDebugProccessingStatus()) - this.transactionQueue.fixStuckProcessing(clear) + this.voteFlipChance = this.config.debug.voteFlipChance + if (this.voteFlipChance == null) { + this.voteFlipChance = 0 } - res.write(response) - res.end() - }) + } - Context.network.registerExternalGet('debug-fifoLocks', isDebugModeMiddleware, (req, res) => { - const getAll = req.query.all === 'true' || false - let toPrint = this.fifoLocks - if (getAll === false) { - toPrint = this.getLockedFifoAccounts() + this.failNoRepairTxChance = 0 + if (this.config && this.config.debug) { + this.failNoRepairTxChance = this.config.debug.failNoRepairTxChance + if (this.failNoRepairTxChance == null) { + this.failNoRepairTxChance = 0 } - const response = JSON.stringify(toPrint, null, 2) - res.write(response) - res.end() - }) - Context.network.registerExternalGet('debug-fifoLocks-unlock', isDebugModeMiddleware, (_req, res) => { - const unlockCount = this.forceUnlockAllFifoLocks('debug-fifoLocks-unlock') - - const response = JSON.stringify({ unlockCount }, null, 2) - res.write(response) - res.end() - }) - } - - _unregisterEndpoints() { - // this.p2p.unregisterInternal('get_account_data3') - // this.p2p.unregisterInternal('get_account_data_by_list') - - // new shard endpoints: - // this.p2p.unregisterInternal('request_state_for_tx') - // this.p2p.unregisterInternal('request_state_for_tx_post') - // this.p2p.unregisterInternal('request_tx_and_state') - - // this.p2p.unregisterInternal('request_receipt_for_tx') - // this.p2p.unregisterInternal('broadcast_state') - this.p2p.unregisterGossipHandler('spread_tx_to_group') - // this.p2p.unregisterInternal('get_account_data_with_queue_hints') - // this.p2p.unregisterInternal('get_globalaccountreport') - // this.p2p.unregisterInternal('spread_appliedVote') - this.p2p.unregisterGossipHandler('spread_appliedReceipt') - - // this.p2p.unregisterInternal('get_trie_hashes') - // this.p2p.unregisterInternal('sync_trie_hashes') - // this.p2p.unregisterInternal('get_trie_accountHashes') - // this.p2p.unregisterInternal('get_account_data_by_hashes') - - for (const binary_endpoint of Object.values(InternalRouteEnum)) { - this.p2p.unregisterInternal(binary_endpoint) } } + // TEMP hack emit events through p2p + // had issues with composition + // emit(event: string | symbol, ...args: any[]){ + // this.p2p.emit(event, args) + + // } + + + + // ////////////////////////////////////////////////////////////////////////// // ////////////////////////// END Old sync check ////////////////////////// // ////////////////////////////////////////////////////////////////////////// @@ -2655,398 +807,7 @@ class StateManager { /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('_waitForShardData', ` `, ` ${utils.stringifyReduce(this.currentCycleShardData)} `) } - } - - async getLocalOrRemoteAccountQueueCount(address: string): Promise { - let count: number = -1 - let committingAppData: unknown = undefined - let account: unknown = undefined - if (this.currentCycleShardData == null) { - await this.waitForShardData() - } - if (this.currentCycleShardData == null) { - throw new Error('getLocalOrRemoteAccount: network not ready') - } - let forceLocalGlobalLookup = false - if (this.accountGlobals.isGlobalAccount(address)) { - forceLocalGlobalLookup = true - } - - let accountIsRemote = this.transactionQueue.isAccountRemote(address) - if (forceLocalGlobalLookup) { - accountIsRemote = false - } - - if (accountIsRemote) { - const maxRetry = 3 - let success = false - let retryCount = 0 - const triedConsensusNodeIds: string[] = [] - - while (success === false && retryCount < maxRetry) { - retryCount += 1 - const randomConsensusNode = this.transactionQueue.getRandomConsensusNodeForAccount( - address, - triedConsensusNodeIds - ) - if (randomConsensusNode == null) { - this.statemanager_fatal( - 'getLocalOrRemoteAccountQueueCount', - `No consensus node found for account ${address}, retry ${retryCount}` - ) - continue // will retry another node if counts permit - } - // record already tried consensus node - triedConsensusNodeIds.push(randomConsensusNode.id) - - // Node Precheck! - if ( - this.isNodeValidForInternalMessage( - randomConsensusNode.id, - 'getLocalOrRemoteAccountQueueCount', - true, - true, - true, - true - ) === false - ) { - /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, `getLocalOrRemoteAccountQueueCount: isNodeValidForInternalMessage failed, retry ${retryCount}`) - continue // will retry another node if counts permit - } - - const message: RequestAccountQueueCounts = { accountIds: [address] } - let r: QueueCountsResponse | false - - try { - // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.getAccountQueueCountBinary) { - const serialized_res = await this.p2p.askBinary( - randomConsensusNode, - InternalRouteEnum.binary_get_account_queue_count, - message, - serializeGetAccountQueueCountReq, - deserializeGetAccountQueueCountResp, - {} - ) - r = serialized_res as QueueCountsResponse - // } else { - // r = await this.p2p.ask(randomConsensusNode, 'get_account_queue_count', message) - // } - } catch (error) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error(`ASK FAIL getLocalOrRemoteAccountQueueCount: askBinary ex: ${error.message}`) - r = null - } - - if (!r) { - if (logFlags.error) this.mainLogger.error('ASK FAIL getLocalOrRemoteAccountQueueCount r === false') - } - - const result = r as QueueCountsResponse - if (result != null && result.counts != null && result.counts.length > 0) { - count = result.counts[0] - committingAppData = result.committingAppData[0] - if (this.config.stateManager.enableAccountFetchForQueueCounts) { - account = result.accounts[0] - } - success = true - /* prettier-ignore */ if (logFlags.verbose) console.log(`queue counts response: ${count} address:${utils.stringifyReduce(address)}`) - } else { - if (result == null) { - /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, 'remote request missing data 2: result == null') - } else if (result.counts == null) { - /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, 'remote request missing data 2: result.counts == null ' + utils.stringifyReduce(result)) - } else if (result.counts.length <= 0) { - /* prettier-ignore */ if (logFlags.verbose) this.getAccountFailDump(address, 'remote request missing data 2: result.counts.length <= 0 ' + utils.stringifyReduce(result)) - } - /* prettier-ignore */ if (logFlags.verbose) console.log(`queue counts failed: ${utils.stringifyReduce(result)} address:${utils.stringifyReduce(address)}`) - } - } - } else { - // we are local! - const queueCountResult = this.transactionQueue.getAccountQueueCount(address) - count = queueCountResult.count - committingAppData = queueCountResult.committingAppData - if (this.config.stateManager.enableAccountFetchForQueueCounts) { - const currentAccountData = await this.getLocalOrRemoteAccount(address) - if (currentAccountData) { - account = currentAccountData.data - } - } - /* prettier-ignore */ if (logFlags.verbose) console.log(`queue counts local: ${count} address:${utils.stringifyReduce(address)}`) - } - - return { count, committingAppData, account } - } - - // todo support metadata so we can serve up only a portion of the account - // todo 2? communicate directly back to client... could have security issue. - // todo 3? require a relatively stout client proof of work - async getLocalOrRemoteAccount( - address: string, - opts: { - useRICache: boolean // enables the RI cache. enable only for immutable data - canThrowException?: boolean - } = { useRICache: false, canThrowException: false } - ): Promise { - let wrappedAccount: ShardusTypes.WrappedDataFromQueue | null = null - if (!isServiceMode()) { - if (this.currentCycleShardData == null) { - await this.waitForShardData() - } - // TSConversion since this should never happen due to the above function should we assert that the value is non null?. Still need to figure out the best practice. - if (this.currentCycleShardData == null) { - throw new Error('getLocalOrRemoteAccount: network not ready') - } - } - - // If enabled, check the RI cache first - if (opts.useRICache) { - const riCacheResult = await this.app.getCachedRIAccountData([address]) - if (riCacheResult != null) { - if (riCacheResult.length > 0) { - nestedCountersInstance.countEvent('stateManager', 'getLocalOrRemoteAccount: RI cache hit') - if (logFlags.verbose) this.mainLogger.debug(`getLocalOrRemoteAccount: RI cache hit for ${address}`) - wrappedAccount = riCacheResult[0] as ShardusTypes.WrappedDataFromQueue - return wrappedAccount - } - } else { - nestedCountersInstance.countEvent('stateManager', 'getLocalOrRemoteAccount: RI cache miss') - } - } - - let forceLocalGlobalLookup = false - - if (this.accountGlobals.isGlobalAccount(address) || isServiceMode()) { - forceLocalGlobalLookup = true - } - - //it seems backwards that isServiceMode would treat the account as always remote, as it has access to all data locally - let accountIsRemote = isServiceMode() ? true : this.transactionQueue.isAccountRemote(address) - - // hack to say we have all the data - if (!isServiceMode()) { - if (this.currentCycleShardData.nodes.length <= this.currentCycleShardData.shardGlobals.consensusRadius) { - accountIsRemote = false - } - } - if (forceLocalGlobalLookup) { - accountIsRemote = false - } - - if (accountIsRemote) { - let randomConsensusNode: P2PTypes.NodeListTypes.Node - const preCheckLimit = 5 - for (let i = 0; i < preCheckLimit; i++) { - randomConsensusNode = this.transactionQueue.getRandomConsensusNodeForAccount(address) - if (randomConsensusNode == null) { - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `precheck: no consensus node found`) - throw new Error(`getLocalOrRemoteAccount: no consensus node found`) - } - // Node Precheck!. this check our internal records to find a good node to talk to. - // it is worth it to look through the list if needed. - if ( - this.isNodeValidForInternalMessage( - randomConsensusNode.id, - 'getLocalOrRemoteAccount', - true, - true, - true, - true - ) === false - ) { - //we got to the end of our tries? - if (i >= preCheckLimit - 1) { - /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'getLocalOrRemoteAccount: isNodeValidForInternalMessage failed, no retry') - //return null ....better to throw an error - if (opts.canThrowException) { - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `precheck: out of nodes to try`) - throw new Error(`getLocalOrRemoteAccount: no consensus nodes worth asking`) - } else return null - } - } else { - break - } - } - - const message = { accountIds: [address] } - - let r: GetAccountDataWithQueueHintsResp - - // if ( - // this.config.p2p.useBinarySerializedEndpoints && - // this.config.p2p.getAccountDataWithQueueHintsBinary - // ) { - try { - const serialized_res = await this.p2p.askBinary< - GetAccountDataWithQueueHintsReqSerializable, - GetAccountDataWithQueueHintsRespSerializable - >( - randomConsensusNode, - InternalRouteEnum.binary_get_account_data_with_queue_hints, - message, - serializeGetAccountDataWithQueueHintsReq, - deserializeGetAccountDataWithQueueHintsResp, - {} - ) - r = serialized_res as GetAccountDataWithQueueHintsResp - } catch (er) { - if (er instanceof ResponseError && logFlags.error) { - this.mainLogger.error( - `ASK FAIL getLocalOrRemoteAccount exception: ResponseError encountered. Code: ${er.Code}, AppCode: ${er.AppCode}, Message: ${er.Message}` - ) - } - if (logFlags.verbose || logFlags.getLocalOrRemote) this.mainLogger.error('askBinary', er) - if (opts.canThrowException) { - throw er - } else { - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `askBinary ex: ${er?.message}`) - } - } - // } else { - // r = await this.p2p.ask(randomConsensusNode, 'get_account_data_with_queue_hints', message) - // } - - if (!r) { - if (logFlags.error || logFlags.getLocalOrRemote) - this.mainLogger.error('ASK FAIL getLocalOrRemoteAccount r === false') - if (opts.canThrowException) throw new Error(`getLocalOrRemoteAccount: remote node had an exception`) - } - - const result = r as GetAccountDataWithQueueHintsResp - if (result != null && result.accountData != null && result.accountData.length > 0) { - wrappedAccount = result.accountData[0] - if (wrappedAccount == null) { - if (logFlags.verbose || logFlags.getLocalOrRemote) - this.getAccountFailDump(address, 'remote result.accountData[0] == null') - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote result.accountData[0] == null`) - } - return wrappedAccount - } else { - //these cases probably should throw an error to, but dont wont to over prescribe the format yet - //if the remote node has a major breakdown it should return false - if (result == null) { - /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'remote request missing data: result == null') - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote else.. result == null`) - } else if (result.accountData == null) { - /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'remote request missing data: result.accountData == null ' + utils.stringifyReduce(result)) - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote else.. result.accountData == null`) - } else if (result.accountData.length <= 0) { - /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'remote request missing data: result.accountData.length <= 0 ' + utils.stringifyReduce(result)) - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `remote else.. result.accountData.length <= 0 `) - } - } - } else { - // we are local! - const accountData = await this.app.getAccountDataByList([address]) - if (accountData != null) { - for (const wrappedAccountEntry of accountData) { - // We are going to add in new data here, which upgrades the account wrapper to a new type. - const expandedRef = wrappedAccountEntry as ShardusTypes.WrappedDataFromQueue - expandedRef.seenInQueue = false - - if (this.lastSeenAccountsMap != null) { - const queueEntry = this.lastSeenAccountsMap[expandedRef.accountId] - if (queueEntry != null) { - expandedRef.seenInQueue = true - } - } - wrappedAccount = expandedRef - } - } else { - //this should probably throw as we expect a [] for the real empty case - //avoiding too many changes - if (logFlags.verbose || logFlags.getLocalOrRemote) - this.getAccountFailDump(address, 'getAccountDataByList() returned null') - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `localload: getAccountDataByList() returned null`) - return null - } - // there must have been an issue in the past, but for some reason we are checking the first element in the array now. - if (accountData[0] == null) { - if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, 'accountData[0] == null') - nestedCountersInstance.countEvent('getLocalOrRemoteAccount', `localload: accountData[0] == null`) - } - if (accountData.length > 1 || accountData.length == 0) { - /* prettier-ignore */ if (logFlags.verbose || logFlags.getLocalOrRemote) this.getAccountFailDump(address, `getAccountDataByList() returned wrong element count: ${accountData}`) - nestedCountersInstance.countEvent( - 'getLocalOrRemoteAccount', - `localload: getAccountDataByList() returned wrong element count` - ) - } - return wrappedAccount - } - return null - } - - getAccountFailDump(address: string, message: string) { - // this.currentCycleShardData - /* prettier-ignore */ if (logFlags.playback) this.logger.playbackLogNote('getAccountFailDump', ` `, `${utils.makeShortHash(address)} ${message} `) - } - - // HOMENODEMATHS is this used by any apps? it is not used by shardus - async getRemoteAccount(address: string) { - let wrappedAccount: unknown - - await this.waitForShardData() - // TSConversion since this should never happen due to the above function should we assert that the value is non null?. Still need to figure out the best practice. - if (this.currentCycleShardData == null) { - throw new Error('getRemoteAccount: network not ready') - } - - const homeNode = ShardFunctions.findHomeNode( - this.currentCycleShardData.shardGlobals, - address, - this.currentCycleShardData.parititionShardDataMap - ) - if (homeNode == null) { - throw new Error(`getRemoteAccount: no home node found`) - } - - // Node Precheck! TODO implement retry - if (this.isNodeValidForInternalMessage(homeNode.node.id, 'getRemoteAccount', true, true) === false) { - /* prettier-ignore */ if (logFlags.error) this.mainLogger.error('getRemoteAccount: isNodeValidForInternalMessage failed, no retry yet') - return null - } - - const message = { accountIds: [address] } - let result: GetAccountDataWithQueueHintsResp - // if (this.config.p2p.useBinarySerializedEndpoints && this.config.p2p.getAccountDataWithQueueHintsBinary) { - try { - const serialized_res = await this.p2p.askBinary< - GetAccountDataWithQueueHintsReqSerializable, - GetAccountDataWithQueueHintsRespSerializable - >( - homeNode.node, - InternalRouteEnum.binary_get_account_data_with_queue_hints, - message, - serializeGetAccountDataWithQueueHintsReq, - deserializeGetAccountDataWithQueueHintsResp, - {} - ) - result = serialized_res as GetAccountDataWithQueueHintsResp - } catch (er) { - if (er instanceof ResponseError && logFlags.error) { - this.mainLogger.error( - `ASK FAIL getRemoteAccount exception: ResponseError encountered. Code: ${er.Code}, AppCode: ${er.AppCode}, Message: ${er.Message}` - ) - } else if (logFlags.verbose) this.mainLogger.error('ASK FAIL getRemoteAccount exception:', er) - return null - } - // } else { - // result = await this.p2p.ask(homeNode.node, 'get_account_data_with_queue_hints', message) - // } - - if (!result) { - if (logFlags.error) this.mainLogger.error('ASK FAIL getRemoteAccount result === false') - } - if (result === null) { - if (logFlags.error) this.mainLogger.error('ASK FAIL getRemoteAccount result === null') - } - if (result != null && result.accountData != null && result.accountData.length > 0) { - wrappedAccount = result.accountData[0] - return wrappedAccount - } - - return null - } + } getClosestNodes(hash: string, count = 1, selfExclude = false): ShardusTypes.Node[] { if (this.currentCycleShardData == null) { @@ -3393,215 +1154,7 @@ class StateManager { } } } - } - - /// ///////////////////////////////////////////////////////// - /*** - * ######## #### ######## ####### ## ####### ###### ## ## ###### - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## - * ###### ## ###### ## ## ## ## ## ## ##### ###### - * ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## ## ## ## - * ## #### ## ####### ######## ####### ###### ## ## ###### - */ - - async fifoLock(fifoName: string): Promise { - if (this.config.stateManager.fifoUnlockFix3 === true) { - return - } - - const stack = '' // new Error().stack - if (logFlags.debug) this.mainLogger.debug(`fifoLock: ${fifoName} ${stack}`) - - // eslint-disable-next-line security/detect-object-injection - let thisFifo = this.fifoLocks[fifoName] - if (thisFifo == null) { - thisFifo = { - fifoName, - queueCounter: 0, - waitingList: [], - lastServed: 0, - queueLocked: false, - lockOwner: 1, - lastLock: shardusGetTime(), - } - // eslint-disable-next-line security/detect-object-injection - this.fifoLocks[fifoName] = thisFifo - } - thisFifo.queueCounter++ - const ourID = thisFifo.queueCounter - const entry = { id: ourID } - - if (fifoName === 'accountModification') { - nestedCountersInstance.countEvent('fifo-backup', `accountModification ${thisFifo.waitingList.length}`) - } - - if (thisFifo.waitingList.length > 0 || thisFifo.queueLocked) { - thisFifo.waitingList.push(entry) - // wait till we are at the front of the queue, and the queue is not locked - while ((thisFifo.waitingList.length > 0 && thisFifo.waitingList[0]?.id !== ourID) || thisFifo.queueLocked) { - // todo perf optimization to reduce the amount of times we have to sleep (attempt to come out of sleep at close to the right time) - let sleepEstimate = ourID - thisFifo.lastServed - if (sleepEstimate < 1) { - sleepEstimate = 1 - } - await utils.sleep(1 * sleepEstimate) - // await utils.sleep(2) - } - // remove our entry from the array - thisFifo.waitingList.shift() - } - - // lock things so that only our calling function can do work - thisFifo.queueLocked = true - thisFifo.lockOwner = ourID - thisFifo.lastServed = ourID - //this can be used to cleanup old fifo locks - thisFifo.lastLock = shardusGetTime() - return ourID - } - - fifoUnlock(fifoName: string, id: number) { - if (this.config.stateManager.fifoUnlockFix3 === true) { - return - } - - const stack = '' // new Error().stack - if (logFlags.debug) this.mainLogger.debug(`fifoUnlock: ${fifoName} ${stack}`) - - // eslint-disable-next-line security/detect-object-injection - const thisFifo = this.fifoLocks[fifoName] - if (id === -1 || !thisFifo) { - return // nothing to do - } - if (thisFifo.lockOwner === id) { - thisFifo.queueLocked = false - } else if (id !== -1) { - // this should never happen as long as we are careful to use try/finally blocks - this.statemanager_fatal(`fifoUnlock`, `Failed to unlock the fifo ${thisFifo.fifoName}: ${id}`) - } - } - - /** - * bulkFifoLockAccounts - * @param {string[]} accountIDs - */ - async bulkFifoLockAccounts(accountIDs: string[]) { - if (this.config.stateManager.fifoUnlockFix3 === true) { - return [] - } - // lock all the accounts we will modify - const wrapperLockId = await this.fifoLock('atomicWrapper') - const ourLocks = [] - const seen: StringBoolObjectMap = {} - for (const accountKey of accountIDs) { - // eslint-disable-next-line security/detect-object-injection - if (seen[accountKey] === true) { - ourLocks.push(-1) //lock skipped, so add a placeholder - continue - } - // eslint-disable-next-line security/detect-object-injection - seen[accountKey] = true - const ourLockID = await this.fifoLock(accountKey) - ourLocks.push(ourLockID) - } - this.fifoUnlock('atomicWrapper', wrapperLockId) - return ourLocks - } - - /** - * bulkFifoUnlockAccounts - * @param {string[]} accountIDs - * @param {number[]} ourLocks - */ - bulkFifoUnlockAccounts(accountIDs: string[], ourLocks: number[]) { - if (this.config.stateManager.fifoUnlockFix3 === true) { - return - } - const seen: StringBoolObjectMap = {} - - // unlock the accounts we locked - /* eslint-disable security/detect-object-injection */ - for (let i = 0; i < ourLocks.length; i++) { - const accountID = accountIDs[i] - if (seen[accountID] === true) { - continue - } - seen[accountID] = true - const ourLockID = ourLocks[i] - if (ourLockID == -1) { - this.statemanager_fatal( - `bulkFifoUnlockAccounts_fail`, - `bulkFifoUnlockAccounts hit placeholder i:${i} ${utils.stringifyReduce({ accountIDs, ourLocks })} ` - ) - } - - this.fifoUnlock(accountID, ourLockID) - } - /* eslint-enable security/detect-object-injection */ - } - - getLockedFifoAccounts(): FifoLockObjectMap { - const results = {} - if (this.fifoLocks != null) { - for (const [key, value] of Object.entries(this.fifoLocks)) { - if (value.queueLocked) { - // eslint-disable-next-line security/detect-object-injection - results[key] = value - } - } - } - return results - } - - /** - * this funtion will unlock all fifo locks that are currently locked - * ideally we should not be calling this, but it is currently needed - * as we try to transition to more stable fifo locks. - * @param tag - * @returns - */ - forceUnlockAllFifoLocks(tag: string): number { - nestedCountersInstance.countEvent('processing', 'forceUnlockAllFifoLocks ' + tag) - - const locked = this.getLockedFifoAccounts() - let clearCount = 0 - for (const value of Object.values(locked)) { - value.queueLocked = false - value.waitingList = [] - //set this so we don't clean it up too soon. - value.lastLock = shardusGetTime() - //value.queueCounter - //do we need to fix up counters - clearCount++ - } - return clearCount - } - - /** - * now that we have fixes a but that was stomping fifo locks we could have a problem - * where the memory grows forever. This function will clean up old locks that are no longer needed. - */ - clearStaleFifoLocks() { - try { - const time = shardusGetTime() - 1000 * 60 * 10 //10 minutes ago - const keysToDelete = [] - for (const [key, value] of Object.entries(this.fifoLocks)) { - if (value.lastLock < time && value.queueLocked === false) { - keysToDelete.push(key) - } - } - - for (const key of keysToDelete) { - // eslint-disable-next-line security/detect-object-injection - delete this.fifoLocks[key] - } - nestedCountersInstance.countEvent('stateManager', 'clearStaleFifoLocks', keysToDelete.length) - } catch (err) { - this.mainLogger.error(`clearStaleFifoLocks: ${err}`) - } - } + } /*** * ###### ## ######## ### ## ## ## ## ######## @@ -4026,286 +1579,7 @@ class StateManager { ) } - /*** - * ######## ######## ###### ######## #### ######## ######## ###### - * ## ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## - * ######## ###### ## ###### ## ######## ## ###### - * ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## ## - * ## ## ######## ###### ######## #### ## ## ###### - */ - - // DEPRECATED AFTER POQO - // /** - // * getReceipt - // * Since there are few places where receipts can be stored on a QueueEntry this determines the correct one to return - // * @param queueEntry - // */ - // getReceipt(queueEntry: QueueEntry): AppliedReceipt { - // if (queueEntry.appliedReceiptFinal != null) { - // return queueEntry.appliedReceiptFinal - // } - // // start with a receipt we made - // let receipt: AppliedReceipt = queueEntry.appliedReceipt - // if (receipt == null) { - // // or see if we got one - // receipt = queueEntry.recievedAppliedReceipt - // } - // // if we had to repair use that instead. this stomps the other ones - // if (queueEntry.appliedReceiptForRepair != null) { - // receipt = queueEntry.appliedReceiptForRepair - // } - // queueEntry.appliedReceiptFinal = receipt - // return receipt - // } - - getSignedReceipt(queueEntry: QueueEntry): SignedReceipt { - if (queueEntry.signedReceiptFinal != null) { - return queueEntry.signedReceiptFinal - } - let finalReceipt: SignedReceipt - if (queueEntry.signedReceipt && queueEntry.receivedSignedReceipt == null) { - finalReceipt = queueEntry.signedReceipt - } - if (queueEntry.signedReceipt == null && queueEntry.receivedSignedReceipt) { - // or see if we got one - finalReceipt = queueEntry.receivedSignedReceipt - } - // if we had to repair use that instead. this stomps the other ones - if (queueEntry.signedReceiptForRepair != null) { - finalReceipt = queueEntry.signedReceiptForRepair - } - queueEntry.signedReceiptFinal = finalReceipt - return finalReceipt - } - - // DEPRECATED AFTER POQO - // getReceipt2(queueEntry: QueueEntry): AppliedReceipt2 { - // if (queueEntry.appliedReceiptFinal2 != null) { - // return queueEntry.appliedReceiptFinal2 - // } - // if (Context.config.stateManager.useNewPOQ === false) { - // // start with a receipt we made - // let receipt: AppliedReceipt2 = queueEntry.appliedReceipt2 - // if (receipt == null) { - // // or see if we got one - // receipt = queueEntry.recievedAppliedReceipt2 - // } - // // if we had to repair use that instead. this stomps the other ones - // if (queueEntry.appliedReceiptForRepair2 != null) { - // receipt = queueEntry.appliedReceiptForRepair2 - // } - // queueEntry.appliedReceiptFinal2 = receipt - // return receipt - // } else { - // let finalReceipt: AppliedReceipt2 - // if (queueEntry.appliedReceipt2 && queueEntry.recievedAppliedReceipt2 == null) { - // finalReceipt = queueEntry.appliedReceipt2 - // } - // if (queueEntry.appliedReceipt2 == null && queueEntry.recievedAppliedReceipt2) { - // // or see if we got one - // finalReceipt = queueEntry.recievedAppliedReceipt2 - // } else if (queueEntry.appliedReceipt2 && queueEntry.recievedAppliedReceipt2) { - // // if we have 2 receipts, use a challenge one if there is any - // const isOurReceiptChallenge = queueEntry.appliedReceipt2.confirmOrChallenge && queueEntry.appliedReceipt2.confirmOrChallenge.message === 'challenge' - // const isReceivedReceiptChallenge = queueEntry.recievedAppliedReceipt2.confirmOrChallenge && queueEntry.recievedAppliedReceipt2.confirmOrChallenge.message === 'challenge' - // if (isOurReceiptChallenge && !isReceivedReceiptChallenge) { - // nestedCountersInstance.countEvent('stateManager', 'getReceipt2: isOurReceiptChallenge: true') - // if (logFlags.verbose) this.mainLogger.debug(`getReceipt2: isOurReceiptChallenge: true`) - // finalReceipt = queueEntry.appliedReceipt2 - // return finalReceipt - // } else if (!isOurReceiptChallenge && isReceivedReceiptChallenge) { - // nestedCountersInstance.countEvent('stateManager', 'getReceipt2: isReceivedReceiptChallenge: true') - // if (logFlags.verbose) this.mainLogger.debug(`getReceipt2: isReceivedReceiptChallenge: true`) - // finalReceipt = queueEntry.recievedAppliedReceipt2 - // return finalReceipt - // } - - // // we have 2 receipts. Could be both challenges or confirmation, use a better one - // const localReceiptNodeId = queueEntry.appliedReceipt2.confirmOrChallenge.nodeId - // const receivedReceiptNodeId = queueEntry.recievedAppliedReceipt2.confirmOrChallenge.nodeId - - // const localReceiptNodeRank = this.transactionQueue.computeNodeRank( - // localReceiptNodeId, - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp - // ) - // const receivedReceiptNodeRank = this.transactionQueue.computeNodeRank( - // receivedReceiptNodeId, - // queueEntry.acceptedTx.txId, - // queueEntry.acceptedTx.timestamp - // ) - // if (localReceiptNodeRank < receivedReceiptNodeRank) { - // // lower the rank, the better the receipt - // finalReceipt = queueEntry.appliedReceipt2 - // } else { - // finalReceipt = queueEntry.recievedAppliedReceipt2 - // } - // } - // // if we had to repair use that instead. this stomps the other ones - // if (queueEntry.appliedReceiptForRepair2 != null) { - // finalReceipt = queueEntry.appliedReceiptForRepair2 - // } - // queueEntry.appliedReceiptFinal2 = finalReceipt - // return finalReceipt - // } - // } - - hasReceipt(queueEntry: QueueEntry) { - return this.getSignedReceipt(queueEntry) != null - } - getReceiptResult(queueEntry: QueueEntry) { - const receipt = this.getSignedReceipt(queueEntry) - if (receipt) { - return receipt.proposal.applied - } - return false - } - - // DEPRECATED AFTER POQO - // getReceiptConfirmation(queueEntry: QueueEntry) { - // if (this.transactionQueue.useNewPOQ === false) return true - // const receipt = this.getReceipt2(queueEntry) - // if (receipt) { - // return receipt.result - // } - // if (receipt.confirmOrChallenge && receipt.confirmOrChallenge.message === 'confirm') { - // return true - // } - // return false - // } - - getReceiptProposal(queueEntry: QueueEntry): Proposal { - const receipt = this.getSignedReceipt(queueEntry) - if (receipt) { - return receipt.proposal - } - } - - generateReceiptMapResults(lastCycle: ShardusTypes.Cycle): StateManagerTypes.StateManagerTypes.ReceiptMapResult[] { - const results: StateManagerTypes.StateManagerTypes.ReceiptMapResult[] = [] - - const cycleToSave = lastCycle.counter - - //init results per partition - const receiptMapByPartition: Map = new Map() - for (let i = 0; i < this.currentCycleShardData.shardGlobals.numPartitions; i++) { - const mapResult: ReceiptMapResult = { - cycle: cycleToSave, - partition: i, - receiptMap: {}, - txCount: 0, - txsMap: {}, - txsMapEVMReceipt: {}, - } - receiptMapByPartition.set(i, mapResult) - // add to the list we will return - results.push(mapResult) - } - - // todo add to ReceiptMapResult in shardus types - // txsMap: {[id:string]:WrappedResponse[]}; - // txsMapEVMReceipt: {[id:string]:unknown[]}; - - const queueEntriesToSave: QueueEntry[] = [] - for (const queueEntry of this.transactionQueue._transactionQueue) { - if (queueEntry.cycleToRecordOn === cycleToSave) { - // make sure we have a receipt - const receipt: SignedReceipt = this.getSignedReceipt(queueEntry) - - if (receipt == null) { - //check && queueEntry.globalModification === false because global accounts will not get a receipt, should this change? - /* prettier-ignore */ if(logFlags.error && queueEntry.globalModification === false) this.mainLogger.error(`generateReceiptMapResults found entry in with no receipt in newAcceptedTxQueue. ${utils.stringifyReduce(queueEntry.acceptedTx)}`) - } else { - queueEntriesToSave.push(queueEntry) - } - } - } - - // I am worried that archiveQueueEntries being capped to 5k could cause a reciept breakdown - // if cycle times are long enough to have more than 5000 txs on a node. - // I think we should maybe be working on these as we go rather than processing them in a batch. - - for (const queueEntry of this.transactionQueue.archivedQueueEntries) { - if (queueEntry.cycleToRecordOn === cycleToSave) { - // make sure we have a receipt - const receipt: SignedReceipt = this.getSignedReceipt(queueEntry) - - if (receipt == null) { - //check && queueEntry.globalModification === false - //we dont expect expired TXs to have a receipt. this should reduce log spam - if (queueEntry.state != 'expired') { - /* prettier-ignore */ if(logFlags.error && queueEntry.globalModification === false) this.mainLogger.error(`generateReceiptMapResults found entry in with no receipt in archivedQueueEntries. ${utils.stringifyReduce(queueEntry.acceptedTx)} state:${queueEntry.state}`) - } - } else { - queueEntriesToSave.push(queueEntry) - } - } - } - - const netId = '123abc' - //go over the save list.. - for (const queueEntry of queueEntriesToSave) { - const accountData: ShardusTypes.WrappedResponse[] = queueEntry?.preApplyTXResult?.applyResponse?.accountData - if (accountData == null) { - /* prettier-ignore */ nestedCountersInstance.countRareEvent('generateReceiptMapResults' , `accountData==null tests: ${queueEntry?.preApplyTXResult == null} ${queueEntry?.preApplyTXResult?.applyResponse == null} ${queueEntry?.preApplyTXResult?.applyResponse?.accountData == null}` ) - } - // delete the localCache - if (accountData != null) { - for (const account of accountData) { - delete account.localCache - } - } - // console.log('accountData accountData', accountData) - for (const partition of queueEntry.involvedPartitions) { - const receipt: SignedReceipt = this.getSignedReceipt(queueEntry) - - const status = receipt.proposal.applied === true ? 'applied' : 'rejected' - const txHash = queueEntry.acceptedTx.txId - const obj = { tx: queueEntry.acceptedTx.data, status, netId } - const txResultFullHash = this.crypto.hash(obj) - const txIdShort = utils.short(txHash) - const txResult = utils.short(txResultFullHash) - - /* eslint-disable security/detect-object-injection */ - if (receiptMapByPartition.has(partition)) { - const mapResult: ReceiptMapResult = receiptMapByPartition.get(partition) - //create an array if we have not seen this index yet - if (mapResult.receiptMap[txIdShort] == null) { - mapResult.receiptMap[txIdShort] = [] - } - - // TODO: too much data duplication to put accounts and receitps in mapResult - // They get duplicated per involved partition currently. - // They should be in a separate list I think.. - - let gotAppReceipt = false - //set receipt data. todo get evmReceiptForTX from receipt. - if (receipt.proposal.appReceiptDataHash != null && receipt.proposal.appReceiptDataHash != '') { - const applyResponse = queueEntry?.preApplyTXResult?.applyResponse - // we may not always have appReceiptData... especially in execute in local shard - if (applyResponse && applyResponse.appReceiptDataHash === receipt.proposal.appReceiptDataHash) { - mapResult.txsMapEVMReceipt[txIdShort] = applyResponse.appReceiptData - gotAppReceipt = true - } - } - - nestedCountersInstance.countEvent('stateManager', `gotAppReceipt:${gotAppReceipt}`) - - mapResult.txsMap[txIdShort] = accountData // For tx data to save in Explorer - - //push the result. note the order is not deterministic unless we were to sort at the end. - mapResult.receiptMap[txIdShort].push(txResult) - mapResult.txCount++ - } - /* eslint-enable security/detect-object-injection */ - } - } - - return results - } - + /*** * ###### ####### ######## ######## * ## ## ## ## ## ## ##