From 2c66f7bf30e3b24a2919bd9bed5a24c9b10e802e Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 13:07:22 +0300 Subject: [PATCH 01/11] fix: scrolling performance optimizations for table --- .../PaginatedTable/PaginatedTable.tsx | 86 ++++++++++++++----- .../PaginatedTable/useScrollBasedChunks.ts | 2 +- 2 files changed, 67 insertions(+), 21 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 07e5c325a..795e57a8f 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -99,6 +99,17 @@ export const PaginatedTable = ({ [onDataFetched, setFoundEntities, setIsInitialLoad, setTotalEntities], ); + // Set will-change: transform on scroll container if not already set + React.useLayoutEffect(() => { + const scrollContainer = scrollContainerRef.current; + if (scrollContainer) { + const computedStyle = window.getComputedStyle(scrollContainer); + if (computedStyle.willChange !== 'transform') { + scrollContainer.style.willChange = 'transform'; + } + } + }, [scrollContainerRef.current]); + // Reset table on initialization and filters change React.useLayoutEffect(() => { const defaultTotal = initialEntitiesCount || 0; @@ -110,26 +121,61 @@ export const PaginatedTable = ({ }, [initialEntitiesCount, setTotalEntities, setFoundEntities, setIsInitialLoad]); const renderChunks = () => { - return activeChunks.map((isActive, index) => ( - - key={index} - id={index} - calculatedCount={index === activeChunks.length - 1 ? lastChunkSize : chunkSize} - chunkSize={chunkSize} - rowHeight={rowHeight} - columns={columns} - fetchData={fetchData} - filters={filters} - tableName={tableName} - sortParams={sortParams} - getRowClassName={getRowClassName} - renderErrorMessage={renderErrorMessage} - renderEmptyDataMessage={renderEmptyDataMessage} - onDataFetched={handleDataFetched} - isActive={isActive} - keepCache={keepCache} - /> - )); + const chunks: React.ReactElement[] = []; + let i = 0; + + while (i < activeChunks.length) { + const isActive = activeChunks[i]; + + if (isActive) { + // Render active chunk normally + chunks.push( + + key={i} + id={i} + calculatedCount={i === activeChunks.length - 1 ? lastChunkSize : chunkSize} + chunkSize={chunkSize} + rowHeight={rowHeight} + columns={columns} + fetchData={fetchData} + filters={filters} + tableName={tableName} + sortParams={sortParams} + getRowClassName={getRowClassName} + renderErrorMessage={renderErrorMessage} + renderEmptyDataMessage={renderEmptyDataMessage} + onDataFetched={handleDataFetched} + isActive={isActive} + keepCache={keepCache} + />, + ); + i++; + } else { + // Find consecutive inactive chunks and merge them + const startIndex = i; + let totalHeight = 0; + + while (i < activeChunks.length && !activeChunks[i]) { + const currentChunkSize = + i === activeChunks.length - 1 ? lastChunkSize : chunkSize; + totalHeight += currentChunkSize * rowHeight; + i++; + } + + // Render merged empty tbody for consecutive inactive chunks + chunks.push( + , + ); + } + } + + return chunks; }; const renderTable = () => ( diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index b05b5d458..18883af4c 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -11,7 +11,7 @@ interface UseScrollBasedChunksProps { overscanCount?: number; } -const DEFAULT_OVERSCAN_COUNT = 1; +const DEFAULT_OVERSCAN_COUNT = 2; export const useScrollBasedChunks = ({ scrollContainerRef, From d29e59f8fb8855882266bc6e5cf6c754f4e740fb Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 15:14:53 +0300 Subject: [PATCH 02/11] fix: virtualized spacers --- .../PaginatedTable/PaginatedTable.tsx | 75 +++------- .../PaginatedTable/useVirtualizedTbodies.tsx | 135 ++++++++++++++++++ 2 files changed, 152 insertions(+), 58 deletions(-) create mode 100644 src/components/PaginatedTable/useVirtualizedTbodies.tsx diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 795e57a8f..bad2c0a4e 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -1,7 +1,6 @@ import React from 'react'; import {usePaginatedTableState} from './PaginatedTableContext'; -import {TableChunk} from './TableChunk'; import {TableHead} from './TableHead'; import {DEFAULT_TABLE_ROW_HEIGHT} from './constants'; import {b} from './shared'; @@ -15,6 +14,7 @@ import type { RenderErrorMessage, } from './types'; import {useScrollBasedChunks} from './useScrollBasedChunks'; +import {useVirtualizedTbodies} from './useVirtualizedTbodies'; import './PaginatedTable.scss'; @@ -120,63 +120,22 @@ export const PaginatedTable = ({ setIsInitialLoad(true); }, [initialEntitiesCount, setTotalEntities, setFoundEntities, setIsInitialLoad]); - const renderChunks = () => { - const chunks: React.ReactElement[] = []; - let i = 0; - - while (i < activeChunks.length) { - const isActive = activeChunks[i]; - - if (isActive) { - // Render active chunk normally - chunks.push( - - key={i} - id={i} - calculatedCount={i === activeChunks.length - 1 ? lastChunkSize : chunkSize} - chunkSize={chunkSize} - rowHeight={rowHeight} - columns={columns} - fetchData={fetchData} - filters={filters} - tableName={tableName} - sortParams={sortParams} - getRowClassName={getRowClassName} - renderErrorMessage={renderErrorMessage} - renderEmptyDataMessage={renderEmptyDataMessage} - onDataFetched={handleDataFetched} - isActive={isActive} - keepCache={keepCache} - />, - ); - i++; - } else { - // Find consecutive inactive chunks and merge them - const startIndex = i; - let totalHeight = 0; - - while (i < activeChunks.length && !activeChunks[i]) { - const currentChunkSize = - i === activeChunks.length - 1 ? lastChunkSize : chunkSize; - totalHeight += currentChunkSize * rowHeight; - i++; - } - - // Render merged empty tbody for consecutive inactive chunks - chunks.push( - , - ); - } - } - - return chunks; - }; + const {renderChunks} = useVirtualizedTbodies({ + activeChunks, + chunkSize, + lastChunkSize, + rowHeight, + columns, + fetchData, + filters, + tableName, + sortParams, + getRowClassName, + renderErrorMessage, + renderEmptyDataMessage, + onDataFetched: handleDataFetched, + keepCache, + }); const renderTable = () => ( diff --git a/src/components/PaginatedTable/useVirtualizedTbodies.tsx b/src/components/PaginatedTable/useVirtualizedTbodies.tsx new file mode 100644 index 000000000..0eb24e0d4 --- /dev/null +++ b/src/components/PaginatedTable/useVirtualizedTbodies.tsx @@ -0,0 +1,135 @@ +import React from 'react'; + +import {TableChunk} from './TableChunk'; +import type { + Column, + FetchData, + GetRowClassName, + PaginatedTableData, + RenderEmptyDataMessage, + RenderErrorMessage, + SortParams, +} from './types'; + +interface UseVirtualizedTbodiesProps { + activeChunks: boolean[]; + chunkSize: number; + lastChunkSize: number; + rowHeight: number; + columns: Column[]; + fetchData: FetchData; + filters?: F; + tableName: string; + sortParams?: SortParams; + getRowClassName?: GetRowClassName; + renderErrorMessage?: RenderErrorMessage; + renderEmptyDataMessage?: RenderEmptyDataMessage; + onDataFetched: (data?: PaginatedTableData) => void; + keepCache?: boolean; +} + +export const useVirtualizedTbodies = ({ + activeChunks, + chunkSize, + lastChunkSize, + rowHeight, + columns, + fetchData, + filters, + tableName, + sortParams, + getRowClassName, + renderErrorMessage, + renderEmptyDataMessage, + onDataFetched, + keepCache = true, +}: UseVirtualizedTbodiesProps) => { + // Reusable spacer tbody elements (max 2: before and after active chunks) + const beforeSpacerRef = React.useRef(null); + const afterSpacerRef = React.useRef(null); + + const renderChunks = React.useCallback(() => { + const chunks: React.ReactElement[] = []; + + // Count empty start chunks + let startEmptyCount = 0; + while (startEmptyCount < activeChunks.length && !activeChunks[startEmptyCount]) { + startEmptyCount++; + } + + // Push start spacer if needed + if (startEmptyCount > 0) { + chunks.push( + , + ); + } + + // Collect and push active chunks + for (let i = startEmptyCount; i < activeChunks.length && activeChunks[i]; i++) { + chunks.push( + + key={i} + id={i} + calculatedCount={i === activeChunks.length - 1 ? lastChunkSize : chunkSize} + chunkSize={chunkSize} + rowHeight={rowHeight} + columns={columns} + fetchData={fetchData} + filters={filters} + tableName={tableName} + sortParams={sortParams} + getRowClassName={getRowClassName} + renderErrorMessage={renderErrorMessage} + renderEmptyDataMessage={renderEmptyDataMessage} + onDataFetched={onDataFetched} + isActive={true} + keepCache={keepCache} + />, + ); + startEmptyCount = i + 1; + } + + // Count empty end chunks + const endEmptyCount = activeChunks.length - startEmptyCount; + + // Push end spacer if needed + if (endEmptyCount > 0) { + chunks.push( + , + ); + } + + return chunks; + }, [ + activeChunks, + chunkSize, + lastChunkSize, + rowHeight, + columns, + fetchData, + filters, + tableName, + sortParams, + getRowClassName, + renderErrorMessage, + renderEmptyDataMessage, + onDataFetched, + keepCache, + ]); + + return {renderChunks}; +}; From eaa7d4fcb633588ee1563c293d98222efeb99013 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 16:27:46 +0300 Subject: [PATCH 03/11] fix: remove isActive --- src/components/PaginatedTable/TableChunk.tsx | 21 ++++++------------- .../PaginatedTable/useVirtualizedTbodies.tsx | 1 - 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index 0a4085a7e..5dc3d8030 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -29,7 +29,6 @@ interface TableChunkProps { columns: Column[]; filters?: F; sortParams?: SortParams; - isActive: boolean; tableName: string; fetchData: FetchData; @@ -56,7 +55,6 @@ export const TableChunk = typedMemo(function TableChunk({ renderErrorMessage, renderEmptyDataMessage, onDataFetched, - isActive, keepCache, }: TableChunkProps) { const [isTimeoutActive, setIsTimeoutActive] = React.useState(true); @@ -75,7 +73,7 @@ export const TableChunk = typedMemo(function TableChunk({ }; tableDataApi.useFetchTableChunkQuery(queryParams, { - skip: isTimeoutActive || !isActive, + skip: isTimeoutActive, pollingInterval: autoRefreshInterval, refetchOnMountOrArgChange: !keepCache, }); @@ -85,7 +83,7 @@ export const TableChunk = typedMemo(function TableChunk({ React.useEffect(() => { let timeout = 0; - if (isActive && isTimeoutActive) { + if (isTimeoutActive) { timeout = window.setTimeout(() => { setIsTimeoutActive(false); }, DEBOUNCE_TIMEOUT); @@ -94,10 +92,10 @@ export const TableChunk = typedMemo(function TableChunk({ return () => { window.clearTimeout(timeout); }; - }, [isActive, isTimeoutActive]); + }, [isTimeoutActive]); React.useEffect(() => { - if (currentData && isActive) { + if (currentData) { onDataFetched({ ...currentData, data: currentData.data as T[], @@ -105,15 +103,11 @@ export const TableChunk = typedMemo(function TableChunk({ total: currentData.total || 0, }); } - }, [currentData, isActive, onDataFetched]); + }, [currentData, onDataFetched]); const dataLength = currentData?.data?.length || calculatedCount; const renderContent = () => { - if (!isActive) { - return null; - } - if (!currentData) { if (error) { const errorData = error as IResponseError; @@ -158,10 +152,7 @@ export const TableChunk = typedMemo(function TableChunk({ id={id.toString()} style={{ height: `${dataLength * rowHeight}px`, - // Default display: table-row-group doesn't work in Safari and breaks the table - // display: block works in Safari, but disconnects thead and tbody cell grids - // Hack to make it work in all cases - display: isActive ? 'table-row-group' : 'block', + display: 'table-row-group', }} > {renderContent()} diff --git a/src/components/PaginatedTable/useVirtualizedTbodies.tsx b/src/components/PaginatedTable/useVirtualizedTbodies.tsx index 0eb24e0d4..5d4dc344a 100644 --- a/src/components/PaginatedTable/useVirtualizedTbodies.tsx +++ b/src/components/PaginatedTable/useVirtualizedTbodies.tsx @@ -89,7 +89,6 @@ export const useVirtualizedTbodies = ({ renderErrorMessage={renderErrorMessage} renderEmptyDataMessage={renderEmptyDataMessage} onDataFetched={onDataFetched} - isActive={true} keepCache={keepCache} />, ); From 6a3449ac3dc7e62e41dfe77a65032b8d9ec979d4 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 16:53:57 +0300 Subject: [PATCH 04/11] fix: merge all rows in one tbody --- src/components/PaginatedTable/TableChunk.tsx | 12 +-------- .../PaginatedTable/useVirtualizedTbodies.tsx | 27 ++++++++++++++++--- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index 5dc3d8030..de140405f 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -147,15 +147,5 @@ export const TableChunk = typedMemo(function TableChunk({ )); }; - return ( - - {renderContent()} - - ); + return {renderContent()}; }); diff --git a/src/components/PaginatedTable/useVirtualizedTbodies.tsx b/src/components/PaginatedTable/useVirtualizedTbodies.tsx index 5d4dc344a..4d877313d 100644 --- a/src/components/PaginatedTable/useVirtualizedTbodies.tsx +++ b/src/components/PaginatedTable/useVirtualizedTbodies.tsx @@ -71,13 +71,19 @@ export const useVirtualizedTbodies = ({ ); } - // Collect and push active chunks + // Collect active chunks and calculate total height + const activeChunkElements: React.ReactElement[] = []; + let totalActiveHeight = 0; + for (let i = startEmptyCount; i < activeChunks.length && activeChunks[i]; i++) { - chunks.push( + const chunkRowCount = i === activeChunks.length - 1 ? lastChunkSize : chunkSize; + totalActiveHeight += chunkRowCount * rowHeight; + + activeChunkElements.push( key={i} id={i} - calculatedCount={i === activeChunks.length - 1 ? lastChunkSize : chunkSize} + calculatedCount={chunkRowCount} chunkSize={chunkSize} rowHeight={rowHeight} columns={columns} @@ -95,6 +101,21 @@ export const useVirtualizedTbodies = ({ startEmptyCount = i + 1; } + // Wrap active chunks in a single tbody with calculated height + if (activeChunkElements.length > 0) { + chunks.push( + + {activeChunkElements} + , + ); + } + // Count empty end chunks const endEmptyCount = activeChunks.length - startEmptyCount; From e90b9e52a9ed239a524b39e29b2bae88459d60f5 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 17:08:16 +0300 Subject: [PATCH 05/11] fix: dont actually need ref --- src/components/PaginatedTable/useVirtualizedTbodies.tsx | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/components/PaginatedTable/useVirtualizedTbodies.tsx b/src/components/PaginatedTable/useVirtualizedTbodies.tsx index 4d877313d..d009227d4 100644 --- a/src/components/PaginatedTable/useVirtualizedTbodies.tsx +++ b/src/components/PaginatedTable/useVirtualizedTbodies.tsx @@ -44,10 +44,6 @@ export const useVirtualizedTbodies = ({ onDataFetched, keepCache = true, }: UseVirtualizedTbodiesProps) => { - // Reusable spacer tbody elements (max 2: before and after active chunks) - const beforeSpacerRef = React.useRef(null); - const afterSpacerRef = React.useRef(null); - const renderChunks = React.useCallback(() => { const chunks: React.ReactElement[] = []; @@ -62,7 +58,6 @@ export const useVirtualizedTbodies = ({ chunks.push( ({ chunks.push( Date: Tue, 20 May 2025 15:14:38 +0300 Subject: [PATCH 06/11] Revert "fix: remove mocks" This reverts commit a9e4182f14a1394148f1d9a92219560f1d15fed7. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ++++++++++++++++++ 2 files changed, 257 insertions(+), 17 deletions(-) create mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8cc7d7f5..c8ed3d393 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; +import {generateNodes} from './nodes'; + export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -43,23 +45,37 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - - const response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); + let response; + const urlParams = new URLSearchParams(window.location.search); + if (urlParams.get('mocks')) { + // Get mock configuration from URL parameters or use defaults + const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); + const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); + const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); + response = generateNodes(totalNodes, { + maxVdisksPerPDisk: vdisksPerPDisk, + maxPdisks: pdisks, + offset, + limit, + }); + } else { + response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); + } const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts new file mode 100644 index 000000000..1c4cfe187 --- /dev/null +++ b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts @@ -0,0 +1,224 @@ +import {EFlag} from '../../../types/api/enums'; +import type { + TEndpoint, + TNodeInfo, + TNodesInfo, + TPoolStats, + TSystemStateInfo, +} from '../../../types/api/nodes'; +import {TPDiskState} from '../../../types/api/pdisk'; +import {EVDiskState} from '../../../types/api/vdisk'; + +// Different disk sizes to simulate variety (in bytes) +const DISK_SIZES = [ + '68719476736', // 64 GB + '137438953472', // 128 GB + '274877906944', // 256 GB + '549755813888', // 512 GB + '1099511627776', // 1 TB +]; + +const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; + +const generatePoolStats = (count = 5): TPoolStats[] => { + const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; + return poolNames.slice(0, count).map((Name) => ({ + Name, + Usage: Math.random() * 0.02, + Threads: Math.floor(Math.random() * 3) + 1, + })); +}; + +const generateEndpoints = (): TEndpoint[] => [ + {Name: 'ic', Address: ':19001'}, + {Name: 'http-mon', Address: ':8765'}, + {Name: 'grpcs', Address: ':2135'}, + {Name: 'grpc', Address: ':2136'}, +]; + +const generateSystemState = (nodeId: number): TSystemStateInfo => ({ + StartTime: '1734358137851', + ChangeTime: '1734358421375', + LoadAverage: [3.381347656, 2.489257813, 1.279296875], + NumberOfCpus: 8, + SystemState: EFlag.Green, + NodeId: nodeId, + Host: `localhost-${nodeId}`, + Version: 'main.95ce0df', + PoolStats: generatePoolStats(), + Endpoints: generateEndpoints(), + Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], + MemoryLimit: '2147483648', + MaxDiskUsage: 0.002349853516, + Location: { + DataCenter: '1', + Rack: '1', + Unit: '1', + }, + TotalSessions: 0, + CoresUsed: 0.07583969556, + CoresTotal: 8, +}); + +const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ + PDiskId: pdiskId, + ChangeTime: '1734358142074', + Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, + Guid: pdiskId.toString(), + Category: '0', + TotalSize: totalSize, + AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default + State: TPDiskState.Normal, + NodeId: nodeId, + Device: EFlag.Green, + Realtime: EFlag.Green, + SerialNumber: '', + SystemSize: '213909504', + LogUsedSize: '35651584', + LogTotalSize: '68486692864', + EnforcedDynamicSlotSize: '22817013760', +}); + +const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ + VDiskId: { + GroupID: vdiskId, + GroupGeneration: 1, + Ring: 0, + Domain: 0, + VDisk: 0, + }, + ChangeTime: '1734358420919', + PDiskId: pdiskId, + VDiskSlotId: vdiskId, + Guid: '1', + Kind: '0', + NodeId: nodeId, + VDiskState: EVDiskState.OK, + DiskSpace: EFlag.Green, + SatisfactionRank: { + FreshRank: { + Flag: EFlag.Green, + }, + LevelRank: { + Flag: EFlag.Green, + }, + }, + Replicated: true, + ReplicationProgress: 1, + ReplicationSecondsRemaining: 0, + AllocatedSize: '0', + AvailableSize: '22817013760', + HasUnreadableBlobs: false, + IncarnationGuid: '11528832187803248876', + InstanceGuid: '14836434871903384493', + FrontQueues: EFlag.Green, + StoragePoolName: 'static', + ReadThroughput: '0', + WriteThroughput: '420', +}); + +interface NodeGeneratorOptions { + maxVdisksPerPDisk?: number; + maxPdisks?: number; +} + +const DEFAULT_OPTIONS: NodeGeneratorOptions = { + maxVdisksPerPDisk: 3, + maxPdisks: 4, +}; + +const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { + const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; + const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; + + // Generate a random number of pdisks up to maxPdisks + const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; + + // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk + const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => + Math.floor(Math.random() * maxVdisksPerPDisk), + ); + const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); + + return { + NodeId: nodeId, + UptimeSeconds: 284, + CpuUsage: 0.00947996, + DiskSpaceUsage: 0.234985, + SystemState: generateSystemState(nodeId), + PDisks: Array.from({length: pdisksCount}, (_, i) => + generatePDisk(nodeId, i + 1, getRandomDiskSize()), + ), + VDisks: Array.from({length: totalVdisks}, (_, i) => { + // Find which pdisk this vdisk belongs to based on the distribution + let pdiskIndex = 0; + let vdiskCount = pdiskVdisksCounts[0]; + while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { + pdiskIndex++; + vdiskCount += pdiskVdisksCounts[pdiskIndex]; + } + return generateVDisk(nodeId, i, pdiskIndex + 1); + }), + }; +}; + +interface GenerateNodesOptions extends NodeGeneratorOptions { + offset?: number; + limit?: number; +} + +// Keep a cache of generated nodes to maintain consistency between paginated requests +let cachedNodes: TNodeInfo[] | null = null; +let currentTotalNodes = 50; // Default number of nodes + +export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { + const totalNodes = count ?? currentTotalNodes; + const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; + + // Reset cache if total nodes count changes + if (totalNodes !== currentTotalNodes) { + cachedNodes = null; + currentTotalNodes = totalNodes; + } + + // Generate or use cached nodes + if (!cachedNodes) { + cachedNodes = Array.from({length: totalNodes}, (_, i) => + generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), + ); + } + + // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes + let maxSlotsPerDisk = 0; + let maxDisksPerNode = 0; + + cachedNodes.forEach((node) => { + // Count pdisks per node + if (node.PDisks) { + maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); + } + + // Count vdisks per pdisk + if (node.VDisks) { + const pdiskVdiskCounts = new Map(); + node.VDisks.forEach((vdisk) => { + if (typeof vdisk.PDiskId === 'number') { + const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; + pdiskVdiskCounts.set(vdisk.PDiskId, count); + maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); + } + }); + } + }); + + // Get the requested slice of nodes + const paginatedNodes = cachedNodes.slice(offset, offset + limit); + + return { + TotalNodes: totalNodes.toString(), + FoundNodes: totalNodes.toString(), + Nodes: paginatedNodes, + MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), + MaximumDisksPerNode: maxDisksPerNode.toString(), + }; +}; From 6308914b2899626f58c1b67f87b604cbe901ebaf Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 19:27:16 +0300 Subject: [PATCH 07/11] fix: virtualized rows --- .../PaginatedTable/PaginatedTable.tsx | 17 ++--- src/components/PaginatedTable/TableChunk.tsx | 64 +++++++++++++------ .../PaginatedTable/useScrollBasedChunks.ts | 54 ++++++++++------ .../PaginatedTable/useVirtualizedTbodies.tsx | 58 +++++++++-------- 4 files changed, 112 insertions(+), 81 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index bad2c0a4e..aae742d58 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -36,7 +36,7 @@ export interface PaginatedTableProps { keepCache?: boolean; } -const DEFAULT_PAGINATION_LIMIT = 20; +const DEFAULT_PAGINATION_LIMIT = 50; export const PaginatedTable = ({ limit: chunkSize = DEFAULT_PAGINATION_LIMIT, @@ -63,7 +63,7 @@ export const PaginatedTable = ({ const tableRef = React.useRef(null); - const activeChunks = useScrollBasedChunks({ + const {visibleRowRange, totalItems} = useScrollBasedChunks({ scrollContainerRef, tableRef, totalItems: foundEntities, @@ -78,15 +78,6 @@ export const PaginatedTable = ({ setFilters(rawFilters); }, [rawFilters]); - const lastChunkSize = React.useMemo(() => { - // If foundEntities = 0, there will only first chunk - // Display it with 1 row, to display empty data message - if (!foundEntities) { - return 1; - } - return foundEntities % chunkSize || chunkSize; - }, [foundEntities, chunkSize]); - const handleDataFetched = React.useCallback( (data?: PaginatedTableData) => { if (data) { @@ -121,9 +112,9 @@ export const PaginatedTable = ({ }, [initialEntitiesCount, setTotalEntities, setFoundEntities, setIsInitialLoad]); const {renderChunks} = useVirtualizedTbodies({ - activeChunks, + visibleRowRange, + totalItems, chunkSize, - lastChunkSize, rowHeight, columns, fetchData, diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index de140405f..29870d280 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -30,6 +30,8 @@ interface TableChunkProps { filters?: F; sortParams?: SortParams; tableName: string; + startRow: number; + endRow: number; fetchData: FetchData; getRowClassName?: GetRowClassName; @@ -56,6 +58,8 @@ export const TableChunk = typedMemo(function TableChunk({ renderEmptyDataMessage, onDataFetched, keepCache, + startRow, + endRow, }: TableChunkProps) { const [isTimeoutActive, setIsTimeoutActive] = React.useState(true); const [autoRefreshInterval] = useAutoRefreshInterval(); @@ -111,41 +115,59 @@ export const TableChunk = typedMemo(function TableChunk({ if (!currentData) { if (error) { const errorData = error as IResponseError; - return ( - + return [ + {renderErrorMessage ? ( renderErrorMessage(errorData) ) : ( )} - - ); + , + ]; } else { - return getArray(dataLength).map((value) => ( - - )); + return getArray(dataLength) + .map((value, index) => { + const globalRowIndex = id * chunkSize + index; + + if (globalRowIndex < startRow || globalRowIndex > endRow) { + return null; + } + + return ; + }) + .filter(Boolean); } } // Data is loaded, but there are no entities in the chunk if (!currentData.data?.length) { - return ( - + return [ + {renderEmptyDataMessage ? renderEmptyDataMessage() : i18n('empty')} - - ); + , + ]; } - return currentData.data.map((rowData, index) => ( - - )); + return currentData.data + .map((rowData, index) => { + const globalRowIndex = id * chunkSize + index; + + if (globalRowIndex < startRow || globalRowIndex > endRow) { + return null; + } + + return ( + + ); + }) + .filter(Boolean); }; - return {renderContent()}; + return renderContent(); }); diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index 18883af4c..6184f19b9 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -11,7 +11,7 @@ interface UseScrollBasedChunksProps { overscanCount?: number; } -const DEFAULT_OVERSCAN_COUNT = 2; +const DEFAULT_OVERSCAN_COUNT = 15; export const useScrollBasedChunks = ({ scrollContainerRef, @@ -20,15 +20,18 @@ export const useScrollBasedChunks = ({ rowHeight, chunkSize, overscanCount = DEFAULT_OVERSCAN_COUNT, -}: UseScrollBasedChunksProps): boolean[] => { +}: UseScrollBasedChunksProps): { + visibleRowRange: {start: number; end: number}; + totalItems: number; +} => { const chunksCount = React.useMemo( () => Math.ceil(totalItems / chunkSize), [chunkSize, totalItems], ); - const [startChunk, setStartChunk] = React.useState(0); - const [endChunk, setEndChunk] = React.useState( - Math.min(overscanCount, Math.max(chunksCount - 1, 0)), + const [startRow, setStartRow] = React.useState(0); + const [endRow, setEndRow] = React.useState( + Math.min(overscanCount, Math.max(totalItems - 1, 0)), ); const calculateVisibleRange = React.useCallback(() => { @@ -43,19 +46,30 @@ export const useScrollBasedChunks = ({ const visibleStart = Math.max(containerScroll - tableOffset, 0); const visibleEnd = visibleStart + container.clientHeight; - const start = Math.max(Math.floor(visibleStart / rowHeight / chunkSize) - overscanCount, 0); - const end = Math.min( - Math.floor(visibleEnd / rowHeight / chunkSize) + overscanCount, - Math.max(chunksCount - 1, 0), - ); - return {start, end}; - }, [scrollContainerRef, tableRef, rowHeight, chunkSize, overscanCount, chunksCount]); + // Calculate row range first + const rowStart = Math.max(Math.floor(visibleStart / rowHeight) - overscanCount, 0); + const rowEnd = Math.min(Math.floor(visibleEnd / rowHeight) + overscanCount, totalItems - 1); + + // Calculate chunk range from row range + const start = Math.max(Math.floor(rowStart / chunkSize), 0); + const end = Math.min(Math.floor(rowEnd / chunkSize), Math.max(chunksCount - 1, 0)); + + return {start, end, rowStart, rowEnd}; + }, [ + scrollContainerRef, + tableRef, + rowHeight, + chunkSize, + overscanCount, + chunksCount, + totalItems, + ]); const updateVisibleChunks = React.useCallback(() => { const newRange = calculateVisibleRange(); if (newRange) { - setStartChunk(newRange.start); - setEndChunk(newRange.end); + setStartRow(newRange.rowStart); + setEndRow(newRange.rowEnd); } }, [calculateVisibleRange]); @@ -94,11 +108,9 @@ export const useScrollBasedChunks = ({ }, [handleScroll, scrollContainerRef]); return React.useMemo(() => { - // boolean array that represents active chunks - const activeChunks = Array(chunksCount).fill(false); - for (let i = startChunk; i <= endChunk; i++) { - activeChunks[i] = true; - } - return activeChunks; - }, [chunksCount, startChunk, endChunk]); + return { + visibleRowRange: {start: startRow, end: endRow}, + totalItems, + }; + }, [startRow, endRow, totalItems]); }; diff --git a/src/components/PaginatedTable/useVirtualizedTbodies.tsx b/src/components/PaginatedTable/useVirtualizedTbodies.tsx index d009227d4..0948868ef 100644 --- a/src/components/PaginatedTable/useVirtualizedTbodies.tsx +++ b/src/components/PaginatedTable/useVirtualizedTbodies.tsx @@ -12,9 +12,9 @@ import type { } from './types'; interface UseVirtualizedTbodiesProps { - activeChunks: boolean[]; + visibleRowRange: {start: number; end: number}; + totalItems: number; chunkSize: number; - lastChunkSize: number; rowHeight: number; columns: Column[]; fetchData: FetchData; @@ -29,9 +29,9 @@ interface UseVirtualizedTbodiesProps { } export const useVirtualizedTbodies = ({ - activeChunks, + visibleRowRange, + totalItems, chunkSize, - lastChunkSize, rowHeight, columns, fetchData, @@ -44,35 +44,36 @@ export const useVirtualizedTbodies = ({ onDataFetched, keepCache = true, }: UseVirtualizedTbodiesProps) => { + const startRow = visibleRowRange.start; + const endRow = visibleRowRange.end; + const renderChunks = React.useCallback(() => { const chunks: React.ReactElement[] = []; - // Count empty start chunks - let startEmptyCount = 0; - while (startEmptyCount < activeChunks.length && !activeChunks[startEmptyCount]) { - startEmptyCount++; - } + // Calculate which chunks contain visible rows + const totalChunks = Math.ceil(totalItems / chunkSize); + const startChunk = Math.max(0, Math.floor(startRow / chunkSize)); + const endChunk = Math.min(totalChunks - 1, Math.floor(endRow / chunkSize)); - // Push start spacer if needed - if (startEmptyCount > 0) { + // Push start spacer for rows before visible range + const startSpacerHeight = startRow * rowHeight; + if (startSpacerHeight > 0) { chunks.push( , ); } - // Collect active chunks and calculate total height + // Collect active chunks and calculate height for visible rows only const activeChunkElements: React.ReactElement[] = []; - let totalActiveHeight = 0; - for (let i = startEmptyCount; i < activeChunks.length && activeChunks[i]; i++) { - const chunkRowCount = i === activeChunks.length - 1 ? lastChunkSize : chunkSize; - totalActiveHeight += chunkRowCount * rowHeight; + for (let i = startChunk; i <= endChunk; i++) { + const chunkRowCount = i === totalChunks - 1 ? totalItems - i * chunkSize : chunkSize; activeChunkElements.push( @@ -91,13 +92,18 @@ export const useVirtualizedTbodies = ({ renderEmptyDataMessage={renderEmptyDataMessage} onDataFetched={onDataFetched} keepCache={keepCache} + startRow={startRow} + endRow={endRow} />, ); - startEmptyCount = i + 1; } - // Wrap active chunks in a single tbody with calculated height + // Wrap active chunks in a single tbody if (activeChunkElements.length > 0) { + // Calculate height based on visible rows only + const visibleRowCount = endRow - startRow + 1; + const totalActiveHeight = visibleRowCount * rowHeight; + chunks.push( ({ ); } - // Count empty end chunks - const endEmptyCount = activeChunks.length - startEmptyCount; + // Add end spacer for rows after visible range + const endSpacerHeight = Math.max(0, (totalItems - startRow - 1) * rowHeight); - // Push end spacer if needed - if (endEmptyCount > 0) { + if (endSpacerHeight > 0) { chunks.push( , @@ -129,9 +134,10 @@ export const useVirtualizedTbodies = ({ return chunks; }, [ - activeChunks, + startRow, + endRow, + totalItems, chunkSize, - lastChunkSize, rowHeight, columns, fetchData, From f5f9df940b9328d83ae7484b868190e856d3c981 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 20:16:26 +0300 Subject: [PATCH 08/11] Revert "Revert "fix: remove mocks"" This reverts commit eb21487867ffbaf1a977239c84ef287a82d55eb7. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ------------------ 2 files changed, 17 insertions(+), 257 deletions(-) delete mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8ed3d393..c8cc7d7f5 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,8 +13,6 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; -import {generateNodes} from './nodes'; - export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -45,37 +43,23 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - let response; - const urlParams = new URLSearchParams(window.location.search); - if (urlParams.get('mocks')) { - // Get mock configuration from URL parameters or use defaults - const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); - const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); - const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); - response = generateNodes(totalNodes, { - maxVdisksPerPDisk: vdisksPerPDisk, - maxPdisks: pdisks, - offset, - limit, - }); - } else { - response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); - } + + const response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts deleted file mode 100644 index 1c4cfe187..000000000 --- a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts +++ /dev/null @@ -1,224 +0,0 @@ -import {EFlag} from '../../../types/api/enums'; -import type { - TEndpoint, - TNodeInfo, - TNodesInfo, - TPoolStats, - TSystemStateInfo, -} from '../../../types/api/nodes'; -import {TPDiskState} from '../../../types/api/pdisk'; -import {EVDiskState} from '../../../types/api/vdisk'; - -// Different disk sizes to simulate variety (in bytes) -const DISK_SIZES = [ - '68719476736', // 64 GB - '137438953472', // 128 GB - '274877906944', // 256 GB - '549755813888', // 512 GB - '1099511627776', // 1 TB -]; - -const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; - -const generatePoolStats = (count = 5): TPoolStats[] => { - const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; - return poolNames.slice(0, count).map((Name) => ({ - Name, - Usage: Math.random() * 0.02, - Threads: Math.floor(Math.random() * 3) + 1, - })); -}; - -const generateEndpoints = (): TEndpoint[] => [ - {Name: 'ic', Address: ':19001'}, - {Name: 'http-mon', Address: ':8765'}, - {Name: 'grpcs', Address: ':2135'}, - {Name: 'grpc', Address: ':2136'}, -]; - -const generateSystemState = (nodeId: number): TSystemStateInfo => ({ - StartTime: '1734358137851', - ChangeTime: '1734358421375', - LoadAverage: [3.381347656, 2.489257813, 1.279296875], - NumberOfCpus: 8, - SystemState: EFlag.Green, - NodeId: nodeId, - Host: `localhost-${nodeId}`, - Version: 'main.95ce0df', - PoolStats: generatePoolStats(), - Endpoints: generateEndpoints(), - Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], - MemoryLimit: '2147483648', - MaxDiskUsage: 0.002349853516, - Location: { - DataCenter: '1', - Rack: '1', - Unit: '1', - }, - TotalSessions: 0, - CoresUsed: 0.07583969556, - CoresTotal: 8, -}); - -const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ - PDiskId: pdiskId, - ChangeTime: '1734358142074', - Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, - Guid: pdiskId.toString(), - Category: '0', - TotalSize: totalSize, - AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default - State: TPDiskState.Normal, - NodeId: nodeId, - Device: EFlag.Green, - Realtime: EFlag.Green, - SerialNumber: '', - SystemSize: '213909504', - LogUsedSize: '35651584', - LogTotalSize: '68486692864', - EnforcedDynamicSlotSize: '22817013760', -}); - -const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ - VDiskId: { - GroupID: vdiskId, - GroupGeneration: 1, - Ring: 0, - Domain: 0, - VDisk: 0, - }, - ChangeTime: '1734358420919', - PDiskId: pdiskId, - VDiskSlotId: vdiskId, - Guid: '1', - Kind: '0', - NodeId: nodeId, - VDiskState: EVDiskState.OK, - DiskSpace: EFlag.Green, - SatisfactionRank: { - FreshRank: { - Flag: EFlag.Green, - }, - LevelRank: { - Flag: EFlag.Green, - }, - }, - Replicated: true, - ReplicationProgress: 1, - ReplicationSecondsRemaining: 0, - AllocatedSize: '0', - AvailableSize: '22817013760', - HasUnreadableBlobs: false, - IncarnationGuid: '11528832187803248876', - InstanceGuid: '14836434871903384493', - FrontQueues: EFlag.Green, - StoragePoolName: 'static', - ReadThroughput: '0', - WriteThroughput: '420', -}); - -interface NodeGeneratorOptions { - maxVdisksPerPDisk?: number; - maxPdisks?: number; -} - -const DEFAULT_OPTIONS: NodeGeneratorOptions = { - maxVdisksPerPDisk: 3, - maxPdisks: 4, -}; - -const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { - const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; - const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; - - // Generate a random number of pdisks up to maxPdisks - const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; - - // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk - const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => - Math.floor(Math.random() * maxVdisksPerPDisk), - ); - const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); - - return { - NodeId: nodeId, - UptimeSeconds: 284, - CpuUsage: 0.00947996, - DiskSpaceUsage: 0.234985, - SystemState: generateSystemState(nodeId), - PDisks: Array.from({length: pdisksCount}, (_, i) => - generatePDisk(nodeId, i + 1, getRandomDiskSize()), - ), - VDisks: Array.from({length: totalVdisks}, (_, i) => { - // Find which pdisk this vdisk belongs to based on the distribution - let pdiskIndex = 0; - let vdiskCount = pdiskVdisksCounts[0]; - while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { - pdiskIndex++; - vdiskCount += pdiskVdisksCounts[pdiskIndex]; - } - return generateVDisk(nodeId, i, pdiskIndex + 1); - }), - }; -}; - -interface GenerateNodesOptions extends NodeGeneratorOptions { - offset?: number; - limit?: number; -} - -// Keep a cache of generated nodes to maintain consistency between paginated requests -let cachedNodes: TNodeInfo[] | null = null; -let currentTotalNodes = 50; // Default number of nodes - -export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { - const totalNodes = count ?? currentTotalNodes; - const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; - - // Reset cache if total nodes count changes - if (totalNodes !== currentTotalNodes) { - cachedNodes = null; - currentTotalNodes = totalNodes; - } - - // Generate or use cached nodes - if (!cachedNodes) { - cachedNodes = Array.from({length: totalNodes}, (_, i) => - generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), - ); - } - - // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes - let maxSlotsPerDisk = 0; - let maxDisksPerNode = 0; - - cachedNodes.forEach((node) => { - // Count pdisks per node - if (node.PDisks) { - maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); - } - - // Count vdisks per pdisk - if (node.VDisks) { - const pdiskVdiskCounts = new Map(); - node.VDisks.forEach((vdisk) => { - if (typeof vdisk.PDiskId === 'number') { - const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; - pdiskVdiskCounts.set(vdisk.PDiskId, count); - maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); - } - }); - } - }); - - // Get the requested slice of nodes - const paginatedNodes = cachedNodes.slice(offset, offset + limit); - - return { - TotalNodes: totalNodes.toString(), - FoundNodes: totalNodes.toString(), - Nodes: paginatedNodes, - MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), - MaximumDisksPerNode: maxDisksPerNode.toString(), - }; -}; From 892214318d6b72f4318f7c11a8a61f08be32ae3e Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 20:16:38 +0300 Subject: [PATCH 09/11] Revert "Revert "Revert "fix: remove mocks""" This reverts commit f5f9df940b9328d83ae7484b868190e856d3c981. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ++++++++++++++++++ 2 files changed, 257 insertions(+), 17 deletions(-) create mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8cc7d7f5..c8ed3d393 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; +import {generateNodes} from './nodes'; + export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -43,23 +45,37 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - - const response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); + let response; + const urlParams = new URLSearchParams(window.location.search); + if (urlParams.get('mocks')) { + // Get mock configuration from URL parameters or use defaults + const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); + const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); + const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); + response = generateNodes(totalNodes, { + maxVdisksPerPDisk: vdisksPerPDisk, + maxPdisks: pdisks, + offset, + limit, + }); + } else { + response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); + } const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts new file mode 100644 index 000000000..1c4cfe187 --- /dev/null +++ b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts @@ -0,0 +1,224 @@ +import {EFlag} from '../../../types/api/enums'; +import type { + TEndpoint, + TNodeInfo, + TNodesInfo, + TPoolStats, + TSystemStateInfo, +} from '../../../types/api/nodes'; +import {TPDiskState} from '../../../types/api/pdisk'; +import {EVDiskState} from '../../../types/api/vdisk'; + +// Different disk sizes to simulate variety (in bytes) +const DISK_SIZES = [ + '68719476736', // 64 GB + '137438953472', // 128 GB + '274877906944', // 256 GB + '549755813888', // 512 GB + '1099511627776', // 1 TB +]; + +const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; + +const generatePoolStats = (count = 5): TPoolStats[] => { + const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; + return poolNames.slice(0, count).map((Name) => ({ + Name, + Usage: Math.random() * 0.02, + Threads: Math.floor(Math.random() * 3) + 1, + })); +}; + +const generateEndpoints = (): TEndpoint[] => [ + {Name: 'ic', Address: ':19001'}, + {Name: 'http-mon', Address: ':8765'}, + {Name: 'grpcs', Address: ':2135'}, + {Name: 'grpc', Address: ':2136'}, +]; + +const generateSystemState = (nodeId: number): TSystemStateInfo => ({ + StartTime: '1734358137851', + ChangeTime: '1734358421375', + LoadAverage: [3.381347656, 2.489257813, 1.279296875], + NumberOfCpus: 8, + SystemState: EFlag.Green, + NodeId: nodeId, + Host: `localhost-${nodeId}`, + Version: 'main.95ce0df', + PoolStats: generatePoolStats(), + Endpoints: generateEndpoints(), + Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], + MemoryLimit: '2147483648', + MaxDiskUsage: 0.002349853516, + Location: { + DataCenter: '1', + Rack: '1', + Unit: '1', + }, + TotalSessions: 0, + CoresUsed: 0.07583969556, + CoresTotal: 8, +}); + +const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ + PDiskId: pdiskId, + ChangeTime: '1734358142074', + Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, + Guid: pdiskId.toString(), + Category: '0', + TotalSize: totalSize, + AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default + State: TPDiskState.Normal, + NodeId: nodeId, + Device: EFlag.Green, + Realtime: EFlag.Green, + SerialNumber: '', + SystemSize: '213909504', + LogUsedSize: '35651584', + LogTotalSize: '68486692864', + EnforcedDynamicSlotSize: '22817013760', +}); + +const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ + VDiskId: { + GroupID: vdiskId, + GroupGeneration: 1, + Ring: 0, + Domain: 0, + VDisk: 0, + }, + ChangeTime: '1734358420919', + PDiskId: pdiskId, + VDiskSlotId: vdiskId, + Guid: '1', + Kind: '0', + NodeId: nodeId, + VDiskState: EVDiskState.OK, + DiskSpace: EFlag.Green, + SatisfactionRank: { + FreshRank: { + Flag: EFlag.Green, + }, + LevelRank: { + Flag: EFlag.Green, + }, + }, + Replicated: true, + ReplicationProgress: 1, + ReplicationSecondsRemaining: 0, + AllocatedSize: '0', + AvailableSize: '22817013760', + HasUnreadableBlobs: false, + IncarnationGuid: '11528832187803248876', + InstanceGuid: '14836434871903384493', + FrontQueues: EFlag.Green, + StoragePoolName: 'static', + ReadThroughput: '0', + WriteThroughput: '420', +}); + +interface NodeGeneratorOptions { + maxVdisksPerPDisk?: number; + maxPdisks?: number; +} + +const DEFAULT_OPTIONS: NodeGeneratorOptions = { + maxVdisksPerPDisk: 3, + maxPdisks: 4, +}; + +const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { + const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; + const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; + + // Generate a random number of pdisks up to maxPdisks + const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; + + // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk + const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => + Math.floor(Math.random() * maxVdisksPerPDisk), + ); + const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); + + return { + NodeId: nodeId, + UptimeSeconds: 284, + CpuUsage: 0.00947996, + DiskSpaceUsage: 0.234985, + SystemState: generateSystemState(nodeId), + PDisks: Array.from({length: pdisksCount}, (_, i) => + generatePDisk(nodeId, i + 1, getRandomDiskSize()), + ), + VDisks: Array.from({length: totalVdisks}, (_, i) => { + // Find which pdisk this vdisk belongs to based on the distribution + let pdiskIndex = 0; + let vdiskCount = pdiskVdisksCounts[0]; + while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { + pdiskIndex++; + vdiskCount += pdiskVdisksCounts[pdiskIndex]; + } + return generateVDisk(nodeId, i, pdiskIndex + 1); + }), + }; +}; + +interface GenerateNodesOptions extends NodeGeneratorOptions { + offset?: number; + limit?: number; +} + +// Keep a cache of generated nodes to maintain consistency between paginated requests +let cachedNodes: TNodeInfo[] | null = null; +let currentTotalNodes = 50; // Default number of nodes + +export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { + const totalNodes = count ?? currentTotalNodes; + const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; + + // Reset cache if total nodes count changes + if (totalNodes !== currentTotalNodes) { + cachedNodes = null; + currentTotalNodes = totalNodes; + } + + // Generate or use cached nodes + if (!cachedNodes) { + cachedNodes = Array.from({length: totalNodes}, (_, i) => + generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), + ); + } + + // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes + let maxSlotsPerDisk = 0; + let maxDisksPerNode = 0; + + cachedNodes.forEach((node) => { + // Count pdisks per node + if (node.PDisks) { + maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); + } + + // Count vdisks per pdisk + if (node.VDisks) { + const pdiskVdiskCounts = new Map(); + node.VDisks.forEach((vdisk) => { + if (typeof vdisk.PDiskId === 'number') { + const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; + pdiskVdiskCounts.set(vdisk.PDiskId, count); + maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); + } + }); + } + }); + + // Get the requested slice of nodes + const paginatedNodes = cachedNodes.slice(offset, offset + limit); + + return { + TotalNodes: totalNodes.toString(), + FoundNodes: totalNodes.toString(), + Nodes: paginatedNodes, + MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), + MaximumDisksPerNode: maxDisksPerNode.toString(), + }; +}; From 672682092e1445370f523b92f2258ece82aa68a4 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 21:16:07 +0300 Subject: [PATCH 10/11] fix: remove spacers - use absolute positioning --- .../PaginatedTable/PaginatedTable.tsx | 4 +- .../PaginatedTable/useVirtualizedTbodies.tsx | 64 ++++--------------- 2 files changed, 16 insertions(+), 52 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index aae742d58..72fd89959 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -131,7 +131,9 @@ export const PaginatedTable = ({ const renderTable = () => (
- {renderChunks()} +
+ {renderChunks()} +
); diff --git a/src/components/PaginatedTable/useVirtualizedTbodies.tsx b/src/components/PaginatedTable/useVirtualizedTbodies.tsx index 0948868ef..0e9b24530 100644 --- a/src/components/PaginatedTable/useVirtualizedTbodies.tsx +++ b/src/components/PaginatedTable/useVirtualizedTbodies.tsx @@ -48,28 +48,12 @@ export const useVirtualizedTbodies = ({ const endRow = visibleRowRange.end; const renderChunks = React.useCallback(() => { - const chunks: React.ReactElement[] = []; - // Calculate which chunks contain visible rows const totalChunks = Math.ceil(totalItems / chunkSize); const startChunk = Math.max(0, Math.floor(startRow / chunkSize)); const endChunk = Math.min(totalChunks - 1, Math.floor(endRow / chunkSize)); - // Push start spacer for rows before visible range - const startSpacerHeight = startRow * rowHeight; - if (startSpacerHeight > 0) { - chunks.push( - , - ); - } - - // Collect active chunks and calculate height for visible rows only + // Collect active chunks const activeChunkElements: React.ReactElement[] = []; for (let i = startChunk; i <= endChunk; i++) { @@ -98,41 +82,19 @@ export const useVirtualizedTbodies = ({ ); } - // Wrap active chunks in a single tbody - if (activeChunkElements.length > 0) { - // Calculate height based on visible rows only - const visibleRowCount = endRow - startRow + 1; - const totalActiveHeight = visibleRowCount * rowHeight; - - chunks.push( - - {activeChunkElements} - , - ); - } - - // Add end spacer for rows after visible range - const endSpacerHeight = Math.max(0, (totalItems - startRow - 1) * rowHeight); - - if (endSpacerHeight > 0) { - chunks.push( - , - ); - } + const activeChunksTopOffset = startRow * rowHeight; - return chunks; + return ( + + {activeChunkElements} + + ); }, [ startRow, endRow, From 5e388371596e7f8d8855dbafa7720fc3fa0cb029 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 21:16:23 +0300 Subject: [PATCH 11/11] Revert "Revert "Revert "Revert "fix: remove mocks"""" This reverts commit 892214318d6b72f4318f7c11a8a61f08be32ae3e. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ------------------ 2 files changed, 17 insertions(+), 257 deletions(-) delete mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8ed3d393..c8cc7d7f5 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,8 +13,6 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; -import {generateNodes} from './nodes'; - export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -45,37 +43,23 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - let response; - const urlParams = new URLSearchParams(window.location.search); - if (urlParams.get('mocks')) { - // Get mock configuration from URL parameters or use defaults - const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); - const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); - const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); - response = generateNodes(totalNodes, { - maxVdisksPerPDisk: vdisksPerPDisk, - maxPdisks: pdisks, - offset, - limit, - }); - } else { - response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); - } + + const response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts deleted file mode 100644 index 1c4cfe187..000000000 --- a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts +++ /dev/null @@ -1,224 +0,0 @@ -import {EFlag} from '../../../types/api/enums'; -import type { - TEndpoint, - TNodeInfo, - TNodesInfo, - TPoolStats, - TSystemStateInfo, -} from '../../../types/api/nodes'; -import {TPDiskState} from '../../../types/api/pdisk'; -import {EVDiskState} from '../../../types/api/vdisk'; - -// Different disk sizes to simulate variety (in bytes) -const DISK_SIZES = [ - '68719476736', // 64 GB - '137438953472', // 128 GB - '274877906944', // 256 GB - '549755813888', // 512 GB - '1099511627776', // 1 TB -]; - -const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; - -const generatePoolStats = (count = 5): TPoolStats[] => { - const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; - return poolNames.slice(0, count).map((Name) => ({ - Name, - Usage: Math.random() * 0.02, - Threads: Math.floor(Math.random() * 3) + 1, - })); -}; - -const generateEndpoints = (): TEndpoint[] => [ - {Name: 'ic', Address: ':19001'}, - {Name: 'http-mon', Address: ':8765'}, - {Name: 'grpcs', Address: ':2135'}, - {Name: 'grpc', Address: ':2136'}, -]; - -const generateSystemState = (nodeId: number): TSystemStateInfo => ({ - StartTime: '1734358137851', - ChangeTime: '1734358421375', - LoadAverage: [3.381347656, 2.489257813, 1.279296875], - NumberOfCpus: 8, - SystemState: EFlag.Green, - NodeId: nodeId, - Host: `localhost-${nodeId}`, - Version: 'main.95ce0df', - PoolStats: generatePoolStats(), - Endpoints: generateEndpoints(), - Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], - MemoryLimit: '2147483648', - MaxDiskUsage: 0.002349853516, - Location: { - DataCenter: '1', - Rack: '1', - Unit: '1', - }, - TotalSessions: 0, - CoresUsed: 0.07583969556, - CoresTotal: 8, -}); - -const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ - PDiskId: pdiskId, - ChangeTime: '1734358142074', - Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, - Guid: pdiskId.toString(), - Category: '0', - TotalSize: totalSize, - AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default - State: TPDiskState.Normal, - NodeId: nodeId, - Device: EFlag.Green, - Realtime: EFlag.Green, - SerialNumber: '', - SystemSize: '213909504', - LogUsedSize: '35651584', - LogTotalSize: '68486692864', - EnforcedDynamicSlotSize: '22817013760', -}); - -const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ - VDiskId: { - GroupID: vdiskId, - GroupGeneration: 1, - Ring: 0, - Domain: 0, - VDisk: 0, - }, - ChangeTime: '1734358420919', - PDiskId: pdiskId, - VDiskSlotId: vdiskId, - Guid: '1', - Kind: '0', - NodeId: nodeId, - VDiskState: EVDiskState.OK, - DiskSpace: EFlag.Green, - SatisfactionRank: { - FreshRank: { - Flag: EFlag.Green, - }, - LevelRank: { - Flag: EFlag.Green, - }, - }, - Replicated: true, - ReplicationProgress: 1, - ReplicationSecondsRemaining: 0, - AllocatedSize: '0', - AvailableSize: '22817013760', - HasUnreadableBlobs: false, - IncarnationGuid: '11528832187803248876', - InstanceGuid: '14836434871903384493', - FrontQueues: EFlag.Green, - StoragePoolName: 'static', - ReadThroughput: '0', - WriteThroughput: '420', -}); - -interface NodeGeneratorOptions { - maxVdisksPerPDisk?: number; - maxPdisks?: number; -} - -const DEFAULT_OPTIONS: NodeGeneratorOptions = { - maxVdisksPerPDisk: 3, - maxPdisks: 4, -}; - -const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { - const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; - const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; - - // Generate a random number of pdisks up to maxPdisks - const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; - - // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk - const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => - Math.floor(Math.random() * maxVdisksPerPDisk), - ); - const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); - - return { - NodeId: nodeId, - UptimeSeconds: 284, - CpuUsage: 0.00947996, - DiskSpaceUsage: 0.234985, - SystemState: generateSystemState(nodeId), - PDisks: Array.from({length: pdisksCount}, (_, i) => - generatePDisk(nodeId, i + 1, getRandomDiskSize()), - ), - VDisks: Array.from({length: totalVdisks}, (_, i) => { - // Find which pdisk this vdisk belongs to based on the distribution - let pdiskIndex = 0; - let vdiskCount = pdiskVdisksCounts[0]; - while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { - pdiskIndex++; - vdiskCount += pdiskVdisksCounts[pdiskIndex]; - } - return generateVDisk(nodeId, i, pdiskIndex + 1); - }), - }; -}; - -interface GenerateNodesOptions extends NodeGeneratorOptions { - offset?: number; - limit?: number; -} - -// Keep a cache of generated nodes to maintain consistency between paginated requests -let cachedNodes: TNodeInfo[] | null = null; -let currentTotalNodes = 50; // Default number of nodes - -export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { - const totalNodes = count ?? currentTotalNodes; - const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; - - // Reset cache if total nodes count changes - if (totalNodes !== currentTotalNodes) { - cachedNodes = null; - currentTotalNodes = totalNodes; - } - - // Generate or use cached nodes - if (!cachedNodes) { - cachedNodes = Array.from({length: totalNodes}, (_, i) => - generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), - ); - } - - // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes - let maxSlotsPerDisk = 0; - let maxDisksPerNode = 0; - - cachedNodes.forEach((node) => { - // Count pdisks per node - if (node.PDisks) { - maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); - } - - // Count vdisks per pdisk - if (node.VDisks) { - const pdiskVdiskCounts = new Map(); - node.VDisks.forEach((vdisk) => { - if (typeof vdisk.PDiskId === 'number') { - const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; - pdiskVdiskCounts.set(vdisk.PDiskId, count); - maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); - } - }); - } - }); - - // Get the requested slice of nodes - const paginatedNodes = cachedNodes.slice(offset, offset + limit); - - return { - TotalNodes: totalNodes.toString(), - FoundNodes: totalNodes.toString(), - Nodes: paginatedNodes, - MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), - MaximumDisksPerNode: maxDisksPerNode.toString(), - }; -};