Merge branch 'nymkappa/mega-branch' into nymkappa/new-enterprise-launch
This commit is contained in:
		
						commit
						e4b56bac88
					
				| @ -52,6 +52,7 @@ | ||||
|   "ESPLORA": { | ||||
|     "REST_API_URL": "http://127.0.0.1:3000", | ||||
|     "UNIX_SOCKET_PATH": "/tmp/esplora-bitcoin-mainnet", | ||||
|     "BATCH_QUERY_BASE_SIZE": 1000, | ||||
|     "RETRY_UNIX_SOCKET_AFTER": 30000, | ||||
|     "REQUEST_TIMEOUT": 10000, | ||||
|     "FALLBACK_TIMEOUT": 5000, | ||||
| @ -132,6 +133,11 @@ | ||||
|     "BISQ_URL": "https://bisq.markets/api", | ||||
|     "BISQ_ONION": "http://bisqmktse2cabavbr2xjq7xw3h6g5ottemo5rolfcwt6aly6tp5fdryd.onion/api" | ||||
|   }, | ||||
|   "REDIS": { | ||||
|     "ENABLED": false, | ||||
|     "UNIX_SOCKET_PATH": "/tmp/redis.sock", | ||||
|     "BATCH_QUERY_BASE_SIZE": 5000 | ||||
|   }, | ||||
|   "REPLICATION": { | ||||
|     "ENABLED": false, | ||||
|     "AUDIT": false, | ||||
|  | ||||
| @ -53,6 +53,7 @@ | ||||
|   "ESPLORA": { | ||||
|     "REST_API_URL": "__ESPLORA_REST_API_URL__", | ||||
|     "UNIX_SOCKET_PATH": "__ESPLORA_UNIX_SOCKET_PATH__", | ||||
|     "BATCH_QUERY_BASE_SIZE": 1000, | ||||
|     "RETRY_UNIX_SOCKET_AFTER": 888, | ||||
|     "REQUEST_TIMEOUT": 10000, | ||||
|     "FALLBACK_TIMEOUT": 5000, | ||||
| @ -140,6 +141,7 @@ | ||||
|   }, | ||||
|   "REDIS": { | ||||
|     "ENABLED": false, | ||||
|     "UNIX_SOCKET_PATH": "/tmp/redis.sock" | ||||
|     "UNIX_SOCKET_PATH": "/tmp/redis.sock", | ||||
|     "BATCH_QUERY_BASE_SIZE": 5000 | ||||
|   } | ||||
| } | ||||
|  | ||||
| @ -55,6 +55,7 @@ describe('Mempool Backend Config', () => { | ||||
|       expect(config.ESPLORA).toStrictEqual({ | ||||
|         REST_API_URL: 'http://127.0.0.1:3000', | ||||
|         UNIX_SOCKET_PATH: null, | ||||
|         BATCH_QUERY_BASE_SIZE: 1000, | ||||
|         RETRY_UNIX_SOCKET_AFTER: 30000, | ||||
|         REQUEST_TIMEOUT: 10000, | ||||
|         FALLBACK_TIMEOUT: 5000, | ||||
| @ -144,7 +145,8 @@ describe('Mempool Backend Config', () => { | ||||
| 
 | ||||
|       expect(config.REDIS).toStrictEqual({ | ||||
|         ENABLED: false, | ||||
|         UNIX_SOCKET_PATH: '' | ||||
|         UNIX_SOCKET_PATH: '', | ||||
|         BATCH_QUERY_BASE_SIZE: 5000, | ||||
|       }); | ||||
|     }); | ||||
|   }); | ||||
|  | ||||
| @ -5,7 +5,7 @@ export interface AbstractBitcoinApi { | ||||
|   $getRawTransaction(txId: string, skipConversion?: boolean, addPrevout?: boolean, lazyPrevouts?: boolean): Promise<IEsploraApi.Transaction>; | ||||
|   $getRawTransactions(txids: string[]): Promise<IEsploraApi.Transaction[]>; | ||||
|   $getMempoolTransactions(txids: string[]): Promise<IEsploraApi.Transaction[]>; | ||||
|   $getAllMempoolTransactions(lastTxid: string); | ||||
|   $getAllMempoolTransactions(lastTxid?: string, max_txs?: number); | ||||
|   $getTransactionHex(txId: string): Promise<string>; | ||||
|   $getBlockHeightTip(): Promise<number>; | ||||
|   $getBlockHashTip(): Promise<string>; | ||||
|  | ||||
| @ -77,7 +77,7 @@ class BitcoinApi implements AbstractBitcoinApi { | ||||
|     throw new Error('Method getMempoolTransactions not supported by the Bitcoin RPC API.'); | ||||
|   } | ||||
| 
 | ||||
|   $getAllMempoolTransactions(lastTxid: string): Promise<IEsploraApi.Transaction[]> { | ||||
|   $getAllMempoolTransactions(lastTxid?: string, max_txs?: number): Promise<IEsploraApi.Transaction[]> { | ||||
|     throw new Error('Method getAllMempoolTransactions not supported by the Bitcoin RPC API.'); | ||||
| 
 | ||||
|   } | ||||
|  | ||||
| @ -8,8 +8,9 @@ import logger from '../../logger'; | ||||
| interface FailoverHost { | ||||
|   host: string, | ||||
|   rtts: number[], | ||||
|   rtt: number | ||||
|   rtt: number, | ||||
|   failures: number, | ||||
|   latestHeight?: number, | ||||
|   socket?: boolean, | ||||
|   outOfSync?: boolean, | ||||
|   unreachable?: boolean, | ||||
| @ -92,6 +93,7 @@ class FailoverRouter { | ||||
|         host.rtts.unshift(rtt); | ||||
|         host.rtts.slice(0, 5); | ||||
|         host.rtt = host.rtts.reduce((acc, l) => acc + l, 0) / host.rtts.length; | ||||
|         host.latestHeight = height; | ||||
|         if (height == null || isNaN(height) || (maxHeight - height > 2)) { | ||||
|           host.outOfSync = true; | ||||
|         } else { | ||||
| @ -99,22 +101,23 @@ class FailoverRouter { | ||||
|         } | ||||
|         host.unreachable = false; | ||||
|       } else { | ||||
|         host.outOfSync = true; | ||||
|         host.unreachable = true; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     this.sortHosts(); | ||||
| 
 | ||||
|     logger.debug(`Tomahawk ranking: ${this.hosts.map(host => '\navg rtt ' + Math.round(host.rtt).toString().padStart(5, ' ') + ' | reachable? ' + (!host.unreachable || false).toString().padStart(5, ' ') + ' | in sync? ' + (!host.outOfSync || false).toString().padStart(5, ' ') + ` | ${host.host}`).join('')}`); | ||||
|     logger.debug(`Tomahawk ranking:\n${this.hosts.map((host, index) => this.formatRanking(index, host, this.activeHost, maxHeight)).join('\n')}`); | ||||
| 
 | ||||
|     // switch if the current host is out of sync or significantly slower than the next best alternative
 | ||||
|     if (this.activeHost.outOfSync || this.activeHost.unreachable || (this.activeHost !== this.hosts[0] && this.hosts[0].preferred) || (!this.activeHost.preferred && this.activeHost.rtt > (this.hosts[0].rtt * 2) + 50)) { | ||||
|       if (this.activeHost.unreachable) { | ||||
|         logger.warn(`Unable to reach ${this.activeHost.host}, failing over to next best alternative`); | ||||
|         logger.warn(`🚨🚨🚨 Unable to reach ${this.activeHost.host}, failing over to next best alternative 🚨🚨🚨`); | ||||
|       } else if (this.activeHost.outOfSync) { | ||||
|         logger.warn(`${this.activeHost.host} has fallen behind, failing over to next best alternative`); | ||||
|         logger.warn(`🚨🚨🚨 ${this.activeHost.host} has fallen behind, failing over to next best alternative 🚨🚨🚨`); | ||||
|       } else { | ||||
|         logger.debug(`${this.activeHost.host} is no longer the best esplora host`); | ||||
|         logger.debug(`🛠️ ${this.activeHost.host} is no longer the best esplora host 🛠️`); | ||||
|       } | ||||
|       this.electHost(); | ||||
|     } | ||||
| @ -122,6 +125,11 @@ class FailoverRouter { | ||||
|     this.pollTimer = setTimeout(() => { this.pollHosts(); }, this.pollInterval); | ||||
|   } | ||||
| 
 | ||||
|   private formatRanking(index: number, host: FailoverHost, active: FailoverHost, maxHeight: number): string { | ||||
|     const heightStatus = host.outOfSync ? '🚫' : (host.latestHeight && host.latestHeight < maxHeight ? '🟧' : '✅'); | ||||
|     return `${host === active ? '⭐️' : '  '} ${host.rtt < Infinity ? Math.round(host.rtt).toString().padStart(5, ' ') + 'ms' : '    -  '} ${host.unreachable ? '🔥' : '✅'} | block: ${host.latestHeight || '??????'} ${heightStatus} | ${host.host} ${host === active ? '⭐️' : '  '}`; | ||||
|   } | ||||
| 
 | ||||
|   // sort hosts by connection quality, and update default fallback
 | ||||
|   private sortHosts(): void { | ||||
|     // sort by connection quality
 | ||||
| @ -156,7 +164,7 @@ class FailoverRouter { | ||||
|   private addFailure(host: FailoverHost): FailoverHost { | ||||
|     host.failures++; | ||||
|     if (host.failures > 5 && this.multihost) { | ||||
|       logger.warn(`Too many esplora failures on ${this.activeHost.host}, falling back to next best alternative`); | ||||
|       logger.warn(`🚨🚨🚨 Too many esplora failures on ${this.activeHost.host}, falling back to next best alternative 🚨🚨🚨`); | ||||
|       this.electHost(); | ||||
|       return this.activeHost; | ||||
|     } else { | ||||
| @ -225,8 +233,8 @@ class ElectrsApi implements AbstractBitcoinApi { | ||||
|     return this.failoverRouter.$post<IEsploraApi.Transaction[]>('/internal/mempool/txs', txids, 'json'); | ||||
|   } | ||||
| 
 | ||||
|   async $getAllMempoolTransactions(lastSeenTxid?: string): Promise<IEsploraApi.Transaction[]> { | ||||
|     return this.failoverRouter.$get<IEsploraApi.Transaction[]>('/internal/mempool/txs' + (lastSeenTxid ? '/' + lastSeenTxid : '')); | ||||
|   async $getAllMempoolTransactions(lastSeenTxid?: string, max_txs?: number): Promise<IEsploraApi.Transaction[]> { | ||||
|     return this.failoverRouter.$get<IEsploraApi.Transaction[]>('/internal/mempool/txs' + (lastSeenTxid ? '/' + lastSeenTxid : ''), 'json', max_txs ? { max_txs } : null); | ||||
|   } | ||||
| 
 | ||||
|   $getTransactionHex(txId: string): Promise<string> { | ||||
|  | ||||
| @ -761,8 +761,13 @@ class Blocks { | ||||
|         this.updateTimerProgress(timer, `saved ${this.currentBlockHeight} to database`); | ||||
| 
 | ||||
|         if (!fastForwarded) { | ||||
|           const lastestPriceId = await PricesRepository.$getLatestPriceId(); | ||||
|           this.updateTimerProgress(timer, `got latest price id ${this.currentBlockHeight}`); | ||||
|           let lastestPriceId; | ||||
|           try { | ||||
|             lastestPriceId = await PricesRepository.$getLatestPriceId(); | ||||
|             this.updateTimerProgress(timer, `got latest price id ${this.currentBlockHeight}`); | ||||
|           } catch (e) { | ||||
|             logger.debug('failed to fetch latest price id from db: ' + (e instanceof Error ? e.message : e)); | ||||
|           } | ||||
|           if (priceUpdater.historyInserted === true && lastestPriceId !== null) { | ||||
|             await blocksRepository.$saveBlockPrices([{ | ||||
|               height: blockExtended.height, | ||||
| @ -771,9 +776,7 @@ class Blocks { | ||||
|             this.updateTimerProgress(timer, `saved prices for ${this.currentBlockHeight}`); | ||||
|           } else { | ||||
|             logger.debug(`Cannot save block price for ${blockExtended.height} because the price updater hasnt completed yet. Trying again in 10 seconds.`, logger.tags.mining); | ||||
|             setTimeout(() => { | ||||
|               indexer.runSingleTask('blocksPrices'); | ||||
|             }, 10000); | ||||
|             indexer.scheduleSingleTask('blocksPrices', 10000); | ||||
|           } | ||||
| 
 | ||||
|           // Save blocks summary for visualization if it's enabled
 | ||||
|  | ||||
| @ -44,9 +44,13 @@ export enum FeatureBits { | ||||
| 	KeysendOptional = 55, | ||||
| 	ScriptEnforcedLeaseRequired = 2022, | ||||
| 	ScriptEnforcedLeaseOptional = 2023, | ||||
| 	SimpleTaprootChannelsRequiredFinal = 80, | ||||
| 	SimpleTaprootChannelsOptionalFinal = 81, | ||||
| 	SimpleTaprootChannelsRequiredStaging = 180, | ||||
| 	SimpleTaprootChannelsOptionalStaging = 181, | ||||
| 	MaxBolt11Feature = 5114, | ||||
| }; | ||||
|    | ||||
| 
 | ||||
| export const FeaturesMap = new Map<FeatureBits, string>([ | ||||
| 	[FeatureBits.DataLossProtectRequired, 'data-loss-protect'], | ||||
| 	[FeatureBits.DataLossProtectOptional, 'data-loss-protect'], | ||||
| @ -85,6 +89,10 @@ export const FeaturesMap = new Map<FeatureBits, string>([ | ||||
| 	[FeatureBits.ZeroConfOptional, 'zero-conf'], | ||||
| 	[FeatureBits.ShutdownAnySegwitRequired, 'shutdown-any-segwit'], | ||||
| 	[FeatureBits.ShutdownAnySegwitOptional, 'shutdown-any-segwit'], | ||||
| 	[FeatureBits.SimpleTaprootChannelsRequiredFinal, 'taproot-channels'], | ||||
| 	[FeatureBits.SimpleTaprootChannelsOptionalFinal, 'taproot-channels'], | ||||
| 	[FeatureBits.SimpleTaprootChannelsRequiredStaging, 'taproot-channels-staging'], | ||||
| 	[FeatureBits.SimpleTaprootChannelsOptionalStaging, 'taproot-channels-staging'], | ||||
| ]); | ||||
| 
 | ||||
| /** | ||||
|  | ||||
| @ -126,7 +126,7 @@ class Mempool { | ||||
|     loadingIndicators.setProgress('mempool', count / expectedCount * 100); | ||||
|     while (!done) { | ||||
|       try { | ||||
|         const result = await bitcoinApi.$getAllMempoolTransactions(last_txid); | ||||
|         const result = await bitcoinApi.$getAllMempoolTransactions(last_txid, config.ESPLORA.BATCH_QUERY_BASE_SIZE); | ||||
|         if (result) { | ||||
|           for (const tx of result) { | ||||
|             const extendedTransaction = transactionUtils.extendMempoolTransaction(tx); | ||||
| @ -235,7 +235,7 @@ class Mempool { | ||||
| 
 | ||||
|     if (!loaded) { | ||||
|       const remainingTxids = transactions.filter(txid => !this.mempoolCache[txid]); | ||||
|       const sliceLength = 10000; | ||||
|       const sliceLength = config.ESPLORA.BATCH_QUERY_BASE_SIZE; | ||||
|       for (let i = 0; i < Math.ceil(remainingTxids.length / sliceLength); i++) { | ||||
|         const slice = remainingTxids.slice(i * sliceLength, (i + 1) * sliceLength); | ||||
|         const txs = await transactionUtils.$getMempoolTransactionsExtended(slice, false, false, false); | ||||
|  | ||||
| @ -480,7 +480,7 @@ class RbfCache { | ||||
|     }; | ||||
| 
 | ||||
|     if (config.MEMPOOL.BACKEND === 'esplora') { | ||||
|       const sliceLength = 250; | ||||
|       const sliceLength = Math.ceil(config.ESPLORA.BATCH_QUERY_BASE_SIZE / 40); | ||||
|       for (let i = 0; i < Math.ceil(txids.length / sliceLength); i++) { | ||||
|         const slice = txids.slice(i * sliceLength, (i + 1) * sliceLength); | ||||
|         try { | ||||
|  | ||||
| @ -122,8 +122,9 @@ class RedisCache { | ||||
|   async $removeTransactions(transactions: string[]) { | ||||
|     try { | ||||
|       await this.$ensureConnected(); | ||||
|       for (let i = 0; i < Math.ceil(transactions.length / 10000); i++) { | ||||
|         const slice = transactions.slice(i * 10000, (i + 1) * 10000); | ||||
|       const sliceLength = config.REDIS.BATCH_QUERY_BASE_SIZE; | ||||
|       for (let i = 0; i < Math.ceil(transactions.length / sliceLength); i++) { | ||||
|         const slice = transactions.slice(i * sliceLength, (i + 1) * sliceLength); | ||||
|         await this.client.unlink(slice.map(txid => `mempool:tx:${txid}`)); | ||||
|         logger.debug(`Deleted ${slice.length} transactions from the Redis cache`); | ||||
|       } | ||||
|  | ||||
| @ -43,6 +43,7 @@ interface IConfig { | ||||
|   ESPLORA: { | ||||
|     REST_API_URL: string; | ||||
|     UNIX_SOCKET_PATH: string | void | null; | ||||
|     BATCH_QUERY_BASE_SIZE: number; | ||||
|     RETRY_UNIX_SOCKET_AFTER: number; | ||||
|     REQUEST_TIMEOUT: number; | ||||
|     FALLBACK_TIMEOUT: number; | ||||
| @ -151,6 +152,7 @@ interface IConfig { | ||||
|   REDIS: { | ||||
|     ENABLED: boolean; | ||||
|     UNIX_SOCKET_PATH: string; | ||||
|     BATCH_QUERY_BASE_SIZE: number; | ||||
|   }, | ||||
| } | ||||
| 
 | ||||
| @ -195,6 +197,7 @@ const defaults: IConfig = { | ||||
|   'ESPLORA': { | ||||
|     'REST_API_URL': 'http://127.0.0.1:3000', | ||||
|     'UNIX_SOCKET_PATH': null, | ||||
|     'BATCH_QUERY_BASE_SIZE': 1000, | ||||
|     'RETRY_UNIX_SOCKET_AFTER': 30000, | ||||
|     'REQUEST_TIMEOUT': 10000, | ||||
|     'FALLBACK_TIMEOUT': 5000, | ||||
| @ -303,6 +306,7 @@ const defaults: IConfig = { | ||||
|   'REDIS': { | ||||
|     'ENABLED': false, | ||||
|     'UNIX_SOCKET_PATH': '', | ||||
|     'BATCH_QUERY_BASE_SIZE': 5000, | ||||
|   }, | ||||
| }; | ||||
| 
 | ||||
|  | ||||
| @ -2,6 +2,7 @@ import * as fs from 'fs'; | ||||
| import path from 'path'; | ||||
| import config from './config'; | ||||
| import { createPool, Pool, PoolConnection } from 'mysql2/promise'; | ||||
| import { LogLevel } from './logger'; | ||||
| import logger from './logger'; | ||||
| import { FieldPacket, OkPacket, PoolOptions, ResultSetHeader, RowDataPacket } from 'mysql2/typings/mysql'; | ||||
| import { execSync } from 'child_process'; | ||||
| @ -33,7 +34,7 @@ import { execSync } from 'child_process'; | ||||
|   } | ||||
| 
 | ||||
|   public async query<T extends RowDataPacket[][] | RowDataPacket[] | OkPacket | | ||||
|     OkPacket[] | ResultSetHeader>(query, params?, connection?: PoolConnection): Promise<[T, FieldPacket[]]> | ||||
|     OkPacket[] | ResultSetHeader>(query, params?, errorLogLevel: LogLevel | 'silent' = 'debug', connection?: PoolConnection): Promise<[T, FieldPacket[]]> | ||||
|   { | ||||
|     this.checkDBFlag(); | ||||
|     let hardTimeout; | ||||
| @ -55,19 +56,38 @@ import { execSync } from 'child_process'; | ||||
|         }).then(result => { | ||||
|           resolve(result); | ||||
|         }).catch(error => { | ||||
|           if (errorLogLevel !== 'silent') { | ||||
|             logger[errorLogLevel](`database query "${query?.sql?.slice(0, 160) || (typeof(query) === 'string' || query instanceof String ? query?.slice(0, 160) : 'unknown query')}" failed!`); | ||||
|           } | ||||
|           reject(error); | ||||
|         }).finally(() => { | ||||
|           clearTimeout(timer); | ||||
|         }); | ||||
|       }); | ||||
|     } else { | ||||
|       const pool = await this.getPool(); | ||||
|       return pool.query(query, params); | ||||
|       try { | ||||
|         const pool = await this.getPool(); | ||||
|         return pool.query(query, params); | ||||
|       } catch (e) { | ||||
|         if (errorLogLevel !== 'silent') { | ||||
|           logger[errorLogLevel](`database query "${query?.sql?.slice(0, 160) || (typeof(query) === 'string' || query instanceof String ? query?.slice(0, 160) : 'unknown query')}" failed!`); | ||||
|         } | ||||
|         throw e; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   private async $rollbackAtomic(connection: PoolConnection): Promise<void> { | ||||
|     try { | ||||
|       await connection.rollback(); | ||||
|       await connection.release(); | ||||
|     } catch (e) { | ||||
|       logger.warn('Failed to rollback incomplete db transaction: ' + (e instanceof Error ? e.message : e)); | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   public async $atomicQuery<T extends RowDataPacket[][] | RowDataPacket[] | OkPacket | | ||||
|     OkPacket[] | ResultSetHeader>(queries: { query, params }[]): Promise<[T, FieldPacket[]][]> | ||||
|     OkPacket[] | ResultSetHeader>(queries: { query, params }[], errorLogLevel: LogLevel | 'silent' = 'debug'): Promise<[T, FieldPacket[]][]> | ||||
|   { | ||||
|     const pool = await this.getPool(); | ||||
|     const connection = await pool.getConnection(); | ||||
| @ -76,7 +96,7 @@ import { execSync } from 'child_process'; | ||||
| 
 | ||||
|       const results: [T, FieldPacket[]][]  = []; | ||||
|       for (const query of queries) { | ||||
|         const result = await this.query(query.query, query.params, connection) as [T, FieldPacket[]]; | ||||
|         const result = await this.query(query.query, query.params, errorLogLevel, connection) as [T, FieldPacket[]]; | ||||
|         results.push(result); | ||||
|       } | ||||
| 
 | ||||
| @ -84,9 +104,8 @@ import { execSync } from 'child_process'; | ||||
| 
 | ||||
|       return results; | ||||
|     } catch (e) { | ||||
|       logger.err('Could not complete db transaction, rolling back: ' + (e instanceof Error ? e.message : e)); | ||||
|       connection.rollback(); | ||||
|       connection.release(); | ||||
|       logger.warn('Could not complete db transaction, rolling back: ' + (e instanceof Error ? e.message : e)); | ||||
|       this.$rollbackAtomic(connection); | ||||
|       throw e; | ||||
|     } finally { | ||||
|       connection.release(); | ||||
|  | ||||
| @ -92,9 +92,15 @@ class Server { | ||||
|     logger.notice(`Starting Mempool Server${worker ? ' (worker)' : ''}... (${backendInfo.getShortCommitHash()})`); | ||||
| 
 | ||||
|     // Register cleanup listeners for exit events
 | ||||
|     ['exit', 'SIGHUP', 'SIGINT', 'SIGTERM', 'SIGUSR1', 'SIGUSR2', 'uncaughtException', 'unhandledRejection'].forEach(event => { | ||||
|     ['exit', 'SIGHUP', 'SIGINT', 'SIGTERM', 'SIGUSR1', 'SIGUSR2'].forEach(event => { | ||||
|       process.on(event, () => { this.onExit(event); }); | ||||
|     }); | ||||
|     process.on('uncaughtException', (error) => { | ||||
|       this.onUnhandledException('uncaughtException', error); | ||||
|     }); | ||||
|     process.on('unhandledRejection', (reason, promise) => { | ||||
|       this.onUnhandledException('unhandledRejection', reason); | ||||
|     }); | ||||
| 
 | ||||
|     if (config.MEMPOOL.BACKEND === 'esplora') { | ||||
|       bitcoinApi.startHealthChecks(); | ||||
| @ -200,7 +206,7 @@ class Server { | ||||
|       } | ||||
|       const newMempool = await bitcoinApi.$getRawMempool(); | ||||
|       const numHandledBlocks = await blocks.$updateBlocks(); | ||||
|       const pollRate = config.MEMPOOL.POLL_RATE_MS * (indexer.indexerRunning ? 10 : 1); | ||||
|       const pollRate = config.MEMPOOL.POLL_RATE_MS * (indexer.indexerIsRunning() ? 10 : 1); | ||||
|       if (numHandledBlocks === 0) { | ||||
|         await memPool.$updateMempool(newMempool, pollRate); | ||||
|       } | ||||
| @ -314,14 +320,18 @@ class Server { | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   onExit(exitEvent): void { | ||||
|   onExit(exitEvent, code = 0): void { | ||||
|     logger.debug(`onExit for signal: ${exitEvent}`); | ||||
|     if (config.DATABASE.ENABLED) { | ||||
|       DB.releasePidLock(); | ||||
|     } | ||||
|     process.exit(0); | ||||
|     process.exit(code); | ||||
|   } | ||||
| 
 | ||||
|   onUnhandledException(type, error): void { | ||||
|     console.error(`${type}:`, error); | ||||
|     this.onExit(type, 1); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| ((): Server => new Server())(); | ||||
|  | ||||
| @ -15,11 +15,18 @@ export interface CoreIndex { | ||||
|   best_block_height: number; | ||||
| } | ||||
| 
 | ||||
| type TaskName = 'blocksPrices' | 'coinStatsIndex'; | ||||
| 
 | ||||
| class Indexer { | ||||
|   runIndexer = true; | ||||
|   indexerRunning = false; | ||||
|   tasksRunning: string[] = []; | ||||
|   coreIndexes: CoreIndex[] = []; | ||||
|   private runIndexer = true; | ||||
|   private indexerRunning = false; | ||||
|   private tasksRunning: { [key in TaskName]?: boolean; } = {}; | ||||
|   private tasksScheduled: { [key in TaskName]?: NodeJS.Timeout; } = {}; | ||||
|   private coreIndexes: CoreIndex[] = []; | ||||
| 
 | ||||
|   public indexerIsRunning(): boolean { | ||||
|     return this.indexerRunning; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Check which core index is available for indexing | ||||
| @ -69,33 +76,69 @@ class Indexer { | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   public async runSingleTask(task: 'blocksPrices' | 'coinStatsIndex'): Promise<void> { | ||||
|     if (!Common.indexingEnabled()) { | ||||
|       return; | ||||
|     } | ||||
| 
 | ||||
|     if (task === 'blocksPrices' && !this.tasksRunning.includes(task) && !['testnet', 'signet'].includes(config.MEMPOOL.NETWORK)) { | ||||
|       this.tasksRunning.push(task); | ||||
|       const lastestPriceId = await PricesRepository.$getLatestPriceId(); | ||||
|       if (priceUpdater.historyInserted === false || lastestPriceId === null) { | ||||
|         logger.debug(`Blocks prices indexer is waiting for the price updater to complete`, logger.tags.mining); | ||||
|         setTimeout(() => { | ||||
|           this.tasksRunning = this.tasksRunning.filter(runningTask => runningTask !== task); | ||||
|           this.runSingleTask('blocksPrices'); | ||||
|         }, 10000); | ||||
|       } else { | ||||
|         logger.debug(`Blocks prices indexer will run now`, logger.tags.mining); | ||||
|         await mining.$indexBlockPrices(); | ||||
|         this.tasksRunning = this.tasksRunning.filter(runningTask => runningTask !== task); | ||||
|   /** | ||||
|    * schedules a single task to run in `timeout` ms | ||||
|    * only one task of each type may be scheduled | ||||
|    * | ||||
|    * @param {TaskName} task - the type of task | ||||
|    * @param {number} timeout - delay in ms | ||||
|    * @param {boolean} replace - `true` replaces any already scheduled task (works like a debounce), `false` ignores subsequent requests (works like a throttle) | ||||
|    */ | ||||
|   public scheduleSingleTask(task: TaskName, timeout: number = 10000, replace = false): void { | ||||
|     if (this.tasksScheduled[task]) { | ||||
|       if (!replace) { //throttle
 | ||||
|         return; | ||||
|       } else { // debounce
 | ||||
|         clearTimeout(this.tasksScheduled[task]); | ||||
|       } | ||||
|     } | ||||
|     this.tasksScheduled[task] = setTimeout(async () => { | ||||
|       try { | ||||
|         await this.runSingleTask(task); | ||||
|       } catch (e) { | ||||
|         logger.err(`Unexpected error in scheduled task ${task}: ` + (e instanceof Error ? e.message : e)); | ||||
|       } finally { | ||||
|         clearTimeout(this.tasksScheduled[task]); | ||||
|       } | ||||
|     }, timeout); | ||||
|   } | ||||
| 
 | ||||
|     if (task === 'coinStatsIndex' && !this.tasksRunning.includes(task)) { | ||||
|       this.tasksRunning.push(task); | ||||
|       logger.debug(`Indexing coinStatsIndex now`); | ||||
|       await mining.$indexCoinStatsIndex(); | ||||
|       this.tasksRunning = this.tasksRunning.filter(runningTask => runningTask !== task); | ||||
|   /** | ||||
|    * Runs a single task immediately | ||||
|    * | ||||
|    * (use `scheduleSingleTask` instead to queue a task to run after some timeout) | ||||
|    */ | ||||
|   public async runSingleTask(task: TaskName): Promise<void> { | ||||
|     if (!Common.indexingEnabled() || this.tasksRunning[task]) { | ||||
|       return; | ||||
|     } | ||||
|     this.tasksRunning[task] = true; | ||||
| 
 | ||||
|     switch (task) { | ||||
|       case 'blocksPrices': { | ||||
|         if (!['testnet', 'signet'].includes(config.MEMPOOL.NETWORK)) { | ||||
|           let lastestPriceId; | ||||
|           try { | ||||
|             lastestPriceId = await PricesRepository.$getLatestPriceId(); | ||||
|           } catch (e) { | ||||
|             logger.debug('failed to fetch latest price id from db: ' + (e instanceof Error ? e.message : e)); | ||||
|           }          if (priceUpdater.historyInserted === false || lastestPriceId === null) { | ||||
|             logger.debug(`Blocks prices indexer is waiting for the price updater to complete`, logger.tags.mining); | ||||
|             this.scheduleSingleTask(task, 10000); | ||||
|           } else { | ||||
|             logger.debug(`Blocks prices indexer will run now`, logger.tags.mining); | ||||
|             await mining.$indexBlockPrices(); | ||||
|           } | ||||
|         } | ||||
|       } break; | ||||
| 
 | ||||
|       case 'coinStatsIndex': { | ||||
|         logger.debug(`Indexing coinStatsIndex now`); | ||||
|         await mining.$indexCoinStatsIndex(); | ||||
|       } break; | ||||
|     } | ||||
| 
 | ||||
|     this.tasksRunning[task] = false; | ||||
|   } | ||||
| 
 | ||||
|   public async $run(): Promise<void> { | ||||
|  | ||||
| @ -157,4 +157,6 @@ class Logger { | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| export type LogLevel = 'emerg' | 'alert' | 'crit' | 'err' | 'warn' | 'notice' | 'info' | 'debug'; | ||||
| 
 | ||||
| export default new Logger(); | ||||
|  | ||||
| @ -14,7 +14,7 @@ class NodesSocketsRepository { | ||||
|       await DB.query(` | ||||
|         INSERT INTO nodes_sockets(public_key, socket, type) | ||||
|         VALUE (?, ?, ?) | ||||
|       `, [socket.publicKey, socket.addr, socket.network]);
 | ||||
|       `, [socket.publicKey, socket.addr, socket.network], 'silent');
 | ||||
|     } catch (e: any) { | ||||
|       if (e.errno !== 1062) { // ER_DUP_ENTRY - Not an issue, just ignore this
 | ||||
|         logger.err(`Cannot save node socket (${[socket.publicKey, socket.addr, socket.network]}) into db. Reason: ` + (e instanceof Error ? e.message : e)); | ||||
|  | ||||
| @ -79,7 +79,7 @@ class ForensicsService { | ||||
|       } | ||||
| 
 | ||||
|       let progress = 0; | ||||
|       const sliceLength = 1000; | ||||
|       const sliceLength = Math.ceil(config.ESPLORA.BATCH_QUERY_BASE_SIZE / 10); | ||||
|       // process batches of 1000 channels
 | ||||
|       for (let i = 0; i < Math.ceil(allChannels.length / sliceLength); i++) { | ||||
|         const channels = allChannels.slice(i * sliceLength, (i + 1) * sliceLength); | ||||
|  | ||||
| @ -290,7 +290,7 @@ class NetworkSyncService { | ||||
| 
 | ||||
|       const allChannels = await channelsApi.$getChannelsByStatus([0, 1]); | ||||
| 
 | ||||
|       const sliceLength = 5000; | ||||
|       const sliceLength = Math.ceil(config.ESPLORA.BATCH_QUERY_BASE_SIZE / 2); | ||||
|       // process batches of 5000 channels
 | ||||
|       for (let i = 0; i < Math.ceil(allChannels.length / sliceLength); i++) { | ||||
|         const channels = allChannels.slice(i * sliceLength, (i + 1) * sliceLength); | ||||
|  | ||||
							
								
								
									
										3
									
								
								contributors/starius.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								contributors/starius.txt
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,3 @@ | ||||
| I hereby accept the terms of the Contributor License Agreement in the CONTRIBUTING.md file of the mempool/mempool git repository as of Oct 13, 2023. | ||||
| 
 | ||||
| Signed starius | ||||
| @ -53,6 +53,7 @@ | ||||
|   "ESPLORA": { | ||||
|     "REST_API_URL": "__ESPLORA_REST_API_URL__", | ||||
|     "UNIX_SOCKET_PATH": "__ESPLORA_UNIX_SOCKET_PATH__", | ||||
|     "BATCH_QUERY_BASE_SIZE": __ESPLORA_BATCH_QUERY_BASE_SIZE__, | ||||
|     "RETRY_UNIX_SOCKET_AFTER": __ESPLORA_RETRY_UNIX_SOCKET_AFTER__, | ||||
|     "REQUEST_TIMEOUT": __ESPLORA_REQUEST_TIMEOUT__, | ||||
|     "FALLBACK_TIMEOUT": __ESPLORA_FALLBACK_TIMEOUT__, | ||||
| @ -146,6 +147,7 @@ | ||||
|   }, | ||||
|   "REDIS": { | ||||
|     "ENABLED": __REDIS_ENABLED__, | ||||
|     "UNIX_SOCKET_PATH": "__REDIS_UNIX_SOCKET_PATH__" | ||||
|     "UNIX_SOCKET_PATH": "__REDIS_UNIX_SOCKET_PATH__", | ||||
|     "BATCH_QUERY_BASE_SIZE": __REDIS_BATCH_QUERY_BASE_SIZE__ | ||||
|   } | ||||
| } | ||||
|  | ||||
| @ -54,6 +54,7 @@ __ELECTRUM_TLS_ENABLED__=${ELECTRUM_TLS_ENABLED:=false} | ||||
| # ESPLORA | ||||
| __ESPLORA_REST_API_URL__=${ESPLORA_REST_API_URL:=http://127.0.0.1:3000} | ||||
| __ESPLORA_UNIX_SOCKET_PATH__=${ESPLORA_UNIX_SOCKET_PATH:="null"} | ||||
| __ESPLORA_BATCH_QUERY_BASE_SIZE__=${ESPLORA_BATCH_QUERY_BASE_SIZE:=1000} | ||||
| __ESPLORA_RETRY_UNIX_SOCKET_AFTER__=${ESPLORA_RETRY_UNIX_SOCKET_AFTER:=30000} | ||||
| __ESPLORA_REQUEST_TIMEOUT__=${ESPLORA_REQUEST_TIMEOUT:=5000} | ||||
| __ESPLORA_FALLBACK_TIMEOUT__=${ESPLORA_FALLBACK_TIMEOUT:=5000} | ||||
| @ -148,6 +149,7 @@ __MEMPOOL_SERVICES_ACCELERATIONS__=${MEMPOOL_SERVICES_ACCELERATIONS:=false} | ||||
| # REDIS | ||||
| __REDIS_ENABLED__=${REDIS_ENABLED:=false} | ||||
| __REDIS_UNIX_SOCKET_PATH__=${REDIS_UNIX_SOCKET_PATH:=true} | ||||
| __REDIS_BATCH_QUERY_BASE_SIZE__=${REDIS_BATCH_QUERY_BASE_SIZE:=5000} | ||||
| 
 | ||||
| mkdir -p "${__MEMPOOL_CACHE_DIR__}" | ||||
| 
 | ||||
| @ -201,6 +203,7 @@ sed -i "s!__ELECTRUM_TLS_ENABLED__!${__ELECTRUM_TLS_ENABLED__}!g" mempool-config | ||||
| 
 | ||||
| sed -i "s!__ESPLORA_REST_API_URL__!${__ESPLORA_REST_API_URL__}!g" mempool-config.json | ||||
| sed -i "s!__ESPLORA_UNIX_SOCKET_PATH__!${__ESPLORA_UNIX_SOCKET_PATH__}!g" mempool-config.json | ||||
| sed -i "s!__ESPLORA_BATCH_QUERY_BASE_SIZE__!${__ESPLORA_BATCH_QUERY_BASE_SIZE__}!g" mempool-config.json | ||||
| sed -i "s!__ESPLORA_RETRY_UNIX_SOCKET_AFTER__!${__ESPLORA_RETRY_UNIX_SOCKET_AFTER__}!g" mempool-config.json | ||||
| sed -i "s!__ESPLORA_REQUEST_TIMEOUT__!${__ESPLORA_REQUEST_TIMEOUT__}!g" mempool-config.json | ||||
| sed -i "s!__ESPLORA_FALLBACK_TIMEOUT__!${__ESPLORA_FALLBACK_TIMEOUT__}!g" mempool-config.json | ||||
| @ -288,5 +291,6 @@ sed -i "s!__MEMPOOL_SERVICES_ACCELERATIONS__!${__MEMPOOL_SERVICES_ACCELERATIONS_ | ||||
| # REDIS | ||||
| sed -i "s!__REDIS_ENABLED__!${__REDIS_ENABLED__}!g" mempool-config.json | ||||
| sed -i "s!__REDIS_UNIX_SOCKET_PATH__!${__REDIS_UNIX_SOCKET_PATH__}!g" mempool-config.json | ||||
| sed -i "s!__REDIS_BATCH_QUERY_BASE_SIZE__!${__REDIS_BATCH_QUERY_BASE_SIZE__}!g" mempool-config.json | ||||
| 
 | ||||
| node /backend/package/index.js | ||||
|  | ||||
| @ -1,4 +1,5 @@ | ||||
| <span class="truncate" [style.max-width]="maxWidth ? maxWidth + 'px' : null" [style.justify-content]="textAlign" [class.inline]="inline"> | ||||
|   <div class="hidden">{{ text }}</div> | ||||
|     <ng-container *ngIf="link"> | ||||
|       <a [routerLink]="link" class="truncate-link"> | ||||
|         <ng-container *ngIf="rtl; then rtlTruncated; else ltrTruncated;"></ng-container> | ||||
| @ -11,6 +12,7 @@ | ||||
| </span> | ||||
| 
 | ||||
| <ng-template #ltrTruncated> | ||||
| 
 | ||||
|   <span class="first">{{text.slice(0,-lastChars)}}</span><span class="last-four">{{text.slice(-lastChars)}}</span> | ||||
| </ng-template> | ||||
| 
 | ||||
|  | ||||
| @ -27,4 +27,17 @@ | ||||
|   &.inline { | ||||
|     display: inline-flex; | ||||
|   } | ||||
| } | ||||
| } | ||||
| 
 | ||||
| .hidden {  | ||||
|   color: transparent; | ||||
|   position: absolute; | ||||
|   max-width: 300px; | ||||
|   overflow: hidden; | ||||
| } | ||||
| 
 | ||||
| @media (max-width: 567px) { | ||||
|   .hidden { | ||||
|     max-width: 150px; | ||||
|   } | ||||
| } | ||||
|  | ||||
| @ -394,7 +394,7 @@ FREEBSD_PKG=() | ||||
| FREEBSD_PKG+=(zsh sudo git git-lfs screen curl wget calc neovim) | ||||
| FREEBSD_PKG+=(openssh-portable py39-pip rust llvm10 jq base64 libzmq4) | ||||
| FREEBSD_PKG+=(boost-libs autoconf automake gmake gcc libevent libtool pkgconf) | ||||
| FREEBSD_PKG+=(nginx rsync py39-certbot-nginx mariadb105-server keybase) | ||||
| FREEBSD_PKG+=(nginx rsync py39-certbot-nginx mariadb1011-server keybase) | ||||
| FREEBSD_PKG+=(geoipupdate) | ||||
| 
 | ||||
| FREEBSD_UNFURL_PKG=() | ||||
|  | ||||
| @ -1,16 +1,19 @@ | ||||
| #!/usr/bin/env zsh | ||||
| 
 | ||||
| # kill "while true" loops | ||||
| killall sh | ||||
| 
 | ||||
| # kill actual node backends | ||||
| killall node | ||||
| killall node 2>/dev/null | ||||
| 
 | ||||
| # kill "while true" loops | ||||
| killall sh 2>/dev/null | ||||
| 
 | ||||
| # kill unfurler chrome instances | ||||
| killall chrome | ||||
| killall chrome 2>/dev/null | ||||
| 
 | ||||
| # kill xorg | ||||
| killall xinit | ||||
| killall xinit 2>/dev/null | ||||
| 
 | ||||
| # kill dbus | ||||
| killall dbus-daemon 2>/dev/null | ||||
| 
 | ||||
| # kill nginx cache warmer scripts | ||||
| for pid in `ps uaxww|grep warmer|grep zsh|awk '{print $2}'`;do | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user