From 2c66f7bf30e3b24a2919bd9bed5a24c9b10e802e Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 13:07:22 +0300 Subject: [PATCH 01/21] fix: scrolling performance optimizations for table --- .../PaginatedTable/PaginatedTable.tsx | 86 ++++++++++++++----- .../PaginatedTable/useScrollBasedChunks.ts | 2 +- 2 files changed, 67 insertions(+), 21 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 07e5c325a..795e57a8f 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -99,6 +99,17 @@ export const PaginatedTable = ({ [onDataFetched, setFoundEntities, setIsInitialLoad, setTotalEntities], ); + // Set will-change: transform on scroll container if not already set + React.useLayoutEffect(() => { + const scrollContainer = scrollContainerRef.current; + if (scrollContainer) { + const computedStyle = window.getComputedStyle(scrollContainer); + if (computedStyle.willChange !== 'transform') { + scrollContainer.style.willChange = 'transform'; + } + } + }, [scrollContainerRef.current]); + // Reset table on initialization and filters change React.useLayoutEffect(() => { const defaultTotal = initialEntitiesCount || 0; @@ -110,26 +121,61 @@ export const PaginatedTable = ({ }, [initialEntitiesCount, setTotalEntities, setFoundEntities, setIsInitialLoad]); const renderChunks = () => { - return activeChunks.map((isActive, index) => ( - - key={index} - id={index} - calculatedCount={index === activeChunks.length - 1 ? lastChunkSize : chunkSize} - chunkSize={chunkSize} - rowHeight={rowHeight} - columns={columns} - fetchData={fetchData} - filters={filters} - tableName={tableName} - sortParams={sortParams} - getRowClassName={getRowClassName} - renderErrorMessage={renderErrorMessage} - renderEmptyDataMessage={renderEmptyDataMessage} - onDataFetched={handleDataFetched} - isActive={isActive} - keepCache={keepCache} - /> - )); + const chunks: React.ReactElement[] = []; + let i = 0; + + while (i < activeChunks.length) { + const isActive = activeChunks[i]; + + if (isActive) { + // Render active chunk normally + chunks.push( + + key={i} + id={i} + calculatedCount={i === activeChunks.length - 1 ? lastChunkSize : chunkSize} + chunkSize={chunkSize} + rowHeight={rowHeight} + columns={columns} + fetchData={fetchData} + filters={filters} + tableName={tableName} + sortParams={sortParams} + getRowClassName={getRowClassName} + renderErrorMessage={renderErrorMessage} + renderEmptyDataMessage={renderEmptyDataMessage} + onDataFetched={handleDataFetched} + isActive={isActive} + keepCache={keepCache} + />, + ); + i++; + } else { + // Find consecutive inactive chunks and merge them + const startIndex = i; + let totalHeight = 0; + + while (i < activeChunks.length && !activeChunks[i]) { + const currentChunkSize = + i === activeChunks.length - 1 ? lastChunkSize : chunkSize; + totalHeight += currentChunkSize * rowHeight; + i++; + } + + // Render merged empty tbody for consecutive inactive chunks + chunks.push( + , + ); + } + } + + return chunks; }; const renderTable = () => ( diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index b05b5d458..18883af4c 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -11,7 +11,7 @@ interface UseScrollBasedChunksProps { overscanCount?: number; } -const DEFAULT_OVERSCAN_COUNT = 1; +const DEFAULT_OVERSCAN_COUNT = 2; export const useScrollBasedChunks = ({ scrollContainerRef, From eeb6189c80a40bfa9eea650ddbf20910cfdeb23b Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 20 May 2025 15:14:38 +0300 Subject: [PATCH 02/21] Revert "fix: remove mocks" This reverts commit a9e4182f14a1394148f1d9a92219560f1d15fed7. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ++++++++++++++++++ 2 files changed, 257 insertions(+), 17 deletions(-) create mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8cc7d7f5..c8ed3d393 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; +import {generateNodes} from './nodes'; + export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -43,23 +45,37 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - - const response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); + let response; + const urlParams = new URLSearchParams(window.location.search); + if (urlParams.get('mocks')) { + // Get mock configuration from URL parameters or use defaults + const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); + const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); + const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); + response = generateNodes(totalNodes, { + maxVdisksPerPDisk: vdisksPerPDisk, + maxPdisks: pdisks, + offset, + limit, + }); + } else { + response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); + } const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts new file mode 100644 index 000000000..1c4cfe187 --- /dev/null +++ b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts @@ -0,0 +1,224 @@ +import {EFlag} from '../../../types/api/enums'; +import type { + TEndpoint, + TNodeInfo, + TNodesInfo, + TPoolStats, + TSystemStateInfo, +} from '../../../types/api/nodes'; +import {TPDiskState} from '../../../types/api/pdisk'; +import {EVDiskState} from '../../../types/api/vdisk'; + +// Different disk sizes to simulate variety (in bytes) +const DISK_SIZES = [ + '68719476736', // 64 GB + '137438953472', // 128 GB + '274877906944', // 256 GB + '549755813888', // 512 GB + '1099511627776', // 1 TB +]; + +const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; + +const generatePoolStats = (count = 5): TPoolStats[] => { + const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; + return poolNames.slice(0, count).map((Name) => ({ + Name, + Usage: Math.random() * 0.02, + Threads: Math.floor(Math.random() * 3) + 1, + })); +}; + +const generateEndpoints = (): TEndpoint[] => [ + {Name: 'ic', Address: ':19001'}, + {Name: 'http-mon', Address: ':8765'}, + {Name: 'grpcs', Address: ':2135'}, + {Name: 'grpc', Address: ':2136'}, +]; + +const generateSystemState = (nodeId: number): TSystemStateInfo => ({ + StartTime: '1734358137851', + ChangeTime: '1734358421375', + LoadAverage: [3.381347656, 2.489257813, 1.279296875], + NumberOfCpus: 8, + SystemState: EFlag.Green, + NodeId: nodeId, + Host: `localhost-${nodeId}`, + Version: 'main.95ce0df', + PoolStats: generatePoolStats(), + Endpoints: generateEndpoints(), + Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], + MemoryLimit: '2147483648', + MaxDiskUsage: 0.002349853516, + Location: { + DataCenter: '1', + Rack: '1', + Unit: '1', + }, + TotalSessions: 0, + CoresUsed: 0.07583969556, + CoresTotal: 8, +}); + +const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ + PDiskId: pdiskId, + ChangeTime: '1734358142074', + Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, + Guid: pdiskId.toString(), + Category: '0', + TotalSize: totalSize, + AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default + State: TPDiskState.Normal, + NodeId: nodeId, + Device: EFlag.Green, + Realtime: EFlag.Green, + SerialNumber: '', + SystemSize: '213909504', + LogUsedSize: '35651584', + LogTotalSize: '68486692864', + EnforcedDynamicSlotSize: '22817013760', +}); + +const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ + VDiskId: { + GroupID: vdiskId, + GroupGeneration: 1, + Ring: 0, + Domain: 0, + VDisk: 0, + }, + ChangeTime: '1734358420919', + PDiskId: pdiskId, + VDiskSlotId: vdiskId, + Guid: '1', + Kind: '0', + NodeId: nodeId, + VDiskState: EVDiskState.OK, + DiskSpace: EFlag.Green, + SatisfactionRank: { + FreshRank: { + Flag: EFlag.Green, + }, + LevelRank: { + Flag: EFlag.Green, + }, + }, + Replicated: true, + ReplicationProgress: 1, + ReplicationSecondsRemaining: 0, + AllocatedSize: '0', + AvailableSize: '22817013760', + HasUnreadableBlobs: false, + IncarnationGuid: '11528832187803248876', + InstanceGuid: '14836434871903384493', + FrontQueues: EFlag.Green, + StoragePoolName: 'static', + ReadThroughput: '0', + WriteThroughput: '420', +}); + +interface NodeGeneratorOptions { + maxVdisksPerPDisk?: number; + maxPdisks?: number; +} + +const DEFAULT_OPTIONS: NodeGeneratorOptions = { + maxVdisksPerPDisk: 3, + maxPdisks: 4, +}; + +const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { + const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; + const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; + + // Generate a random number of pdisks up to maxPdisks + const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; + + // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk + const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => + Math.floor(Math.random() * maxVdisksPerPDisk), + ); + const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); + + return { + NodeId: nodeId, + UptimeSeconds: 284, + CpuUsage: 0.00947996, + DiskSpaceUsage: 0.234985, + SystemState: generateSystemState(nodeId), + PDisks: Array.from({length: pdisksCount}, (_, i) => + generatePDisk(nodeId, i + 1, getRandomDiskSize()), + ), + VDisks: Array.from({length: totalVdisks}, (_, i) => { + // Find which pdisk this vdisk belongs to based on the distribution + let pdiskIndex = 0; + let vdiskCount = pdiskVdisksCounts[0]; + while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { + pdiskIndex++; + vdiskCount += pdiskVdisksCounts[pdiskIndex]; + } + return generateVDisk(nodeId, i, pdiskIndex + 1); + }), + }; +}; + +interface GenerateNodesOptions extends NodeGeneratorOptions { + offset?: number; + limit?: number; +} + +// Keep a cache of generated nodes to maintain consistency between paginated requests +let cachedNodes: TNodeInfo[] | null = null; +let currentTotalNodes = 50; // Default number of nodes + +export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { + const totalNodes = count ?? currentTotalNodes; + const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; + + // Reset cache if total nodes count changes + if (totalNodes !== currentTotalNodes) { + cachedNodes = null; + currentTotalNodes = totalNodes; + } + + // Generate or use cached nodes + if (!cachedNodes) { + cachedNodes = Array.from({length: totalNodes}, (_, i) => + generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), + ); + } + + // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes + let maxSlotsPerDisk = 0; + let maxDisksPerNode = 0; + + cachedNodes.forEach((node) => { + // Count pdisks per node + if (node.PDisks) { + maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); + } + + // Count vdisks per pdisk + if (node.VDisks) { + const pdiskVdiskCounts = new Map(); + node.VDisks.forEach((vdisk) => { + if (typeof vdisk.PDiskId === 'number') { + const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; + pdiskVdiskCounts.set(vdisk.PDiskId, count); + maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); + } + }); + } + }); + + // Get the requested slice of nodes + const paginatedNodes = cachedNodes.slice(offset, offset + limit); + + return { + TotalNodes: totalNodes.toString(), + FoundNodes: totalNodes.toString(), + Nodes: paginatedNodes, + MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), + MaximumDisksPerNode: maxDisksPerNode.toString(), + }; +}; From ab412527489cb8691e03e536542786bfd251b8ba Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 23:01:35 +0300 Subject: [PATCH 03/21] fix: split render and fetch overscans --- .../PaginatedTable/PaginatedTable.tsx | 33 +++++++-- src/components/PaginatedTable/TableChunk.tsx | 20 +++--- .../PaginatedTable/useScrollBasedChunks.ts | 71 +++++++++++++------ 3 files changed, 86 insertions(+), 38 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 795e57a8f..5e9abd6e3 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -15,6 +15,7 @@ import type { RenderErrorMessage, } from './types'; import {useScrollBasedChunks} from './useScrollBasedChunks'; +import {calculateElementOffsetTop} from './utils'; import './PaginatedTable.scss'; @@ -62,13 +63,15 @@ export const PaginatedTable = ({ const {sortParams, foundEntities} = tableState; const tableRef = React.useRef(null); + const [tableOffset, setTableOffset] = React.useState(0); - const activeChunks = useScrollBasedChunks({ + const chunkStates = useScrollBasedChunks({ scrollContainerRef, tableRef, totalItems: foundEntities, rowHeight, chunkSize, + tableOffset, }); // this prevent situation when filters are new, but active chunks is not yet recalculated (it will be done to the next rendrer, so we bring filters change on the next render too) @@ -99,6 +102,14 @@ export const PaginatedTable = ({ [onDataFetched, setFoundEntities, setIsInitialLoad, setTotalEntities], ); + React.useLayoutEffect(() => { + const scrollContainer = scrollContainerRef.current; + const table = tableRef.current; + if (table && scrollContainer) { + setTableOffset(calculateElementOffsetTop(table, scrollContainer)); + } + }, [scrollContainerRef.current, tableRef.current, foundEntities]); + // Set will-change: transform on scroll container if not already set React.useLayoutEffect(() => { const scrollContainer = scrollContainerRef.current; @@ -124,8 +135,11 @@ export const PaginatedTable = ({ const chunks: React.ReactElement[] = []; let i = 0; - while (i < activeChunks.length) { - const isActive = activeChunks[i]; + while (i < chunkStates.length) { + const chunkState = chunkStates[i]; + const shouldRender = chunkState.shouldRender; + const shouldFetch = chunkState.shouldFetch; + const isActive = shouldRender || shouldFetch; if (isActive) { // Render active chunk normally @@ -133,7 +147,7 @@ export const PaginatedTable = ({ key={i} id={i} - calculatedCount={i === activeChunks.length - 1 ? lastChunkSize : chunkSize} + calculatedCount={i === chunkStates.length - 1 ? lastChunkSize : chunkSize} chunkSize={chunkSize} rowHeight={rowHeight} columns={columns} @@ -145,7 +159,8 @@ export const PaginatedTable = ({ renderErrorMessage={renderErrorMessage} renderEmptyDataMessage={renderEmptyDataMessage} onDataFetched={handleDataFetched} - isActive={isActive} + shouldFetch={chunkState.shouldFetch} + shouldRender={chunkState.shouldRender} keepCache={keepCache} />, ); @@ -155,9 +170,13 @@ export const PaginatedTable = ({ const startIndex = i; let totalHeight = 0; - while (i < activeChunks.length && !activeChunks[i]) { + while ( + i < chunkStates.length && + !chunkStates[i].shouldRender && + !chunkStates[i].shouldFetch + ) { const currentChunkSize = - i === activeChunks.length - 1 ? lastChunkSize : chunkSize; + i === chunkStates.length - 1 ? lastChunkSize : chunkSize; totalHeight += currentChunkSize * rowHeight; i++; } diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index 0a4085a7e..f0d42d01c 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -29,7 +29,8 @@ interface TableChunkProps { columns: Column[]; filters?: F; sortParams?: SortParams; - isActive: boolean; + shouldFetch: boolean; + shouldRender: boolean; tableName: string; fetchData: FetchData; @@ -56,7 +57,8 @@ export const TableChunk = typedMemo(function TableChunk({ renderErrorMessage, renderEmptyDataMessage, onDataFetched, - isActive, + shouldFetch, + shouldRender, keepCache, }: TableChunkProps) { const [isTimeoutActive, setIsTimeoutActive] = React.useState(true); @@ -75,7 +77,7 @@ export const TableChunk = typedMemo(function TableChunk({ }; tableDataApi.useFetchTableChunkQuery(queryParams, { - skip: isTimeoutActive || !isActive, + skip: isTimeoutActive || !shouldFetch, pollingInterval: autoRefreshInterval, refetchOnMountOrArgChange: !keepCache, }); @@ -85,7 +87,7 @@ export const TableChunk = typedMemo(function TableChunk({ React.useEffect(() => { let timeout = 0; - if (isActive && isTimeoutActive) { + if (shouldFetch && isTimeoutActive) { timeout = window.setTimeout(() => { setIsTimeoutActive(false); }, DEBOUNCE_TIMEOUT); @@ -94,10 +96,10 @@ export const TableChunk = typedMemo(function TableChunk({ return () => { window.clearTimeout(timeout); }; - }, [isActive, isTimeoutActive]); + }, [shouldFetch, isTimeoutActive]); React.useEffect(() => { - if (currentData && isActive) { + if (currentData) { onDataFetched({ ...currentData, data: currentData.data as T[], @@ -105,12 +107,12 @@ export const TableChunk = typedMemo(function TableChunk({ total: currentData.total || 0, }); } - }, [currentData, isActive, onDataFetched]); + }, [currentData, onDataFetched]); const dataLength = currentData?.data?.length || calculatedCount; const renderContent = () => { - if (!isActive) { + if (!shouldRender) { return null; } @@ -161,7 +163,7 @@ export const TableChunk = typedMemo(function TableChunk({ // Default display: table-row-group doesn't work in Safari and breaks the table // display: block works in Safari, but disconnects thead and tbody cell grids // Hack to make it work in all cases - display: isActive ? 'table-row-group' : 'block', + display: shouldRender ? 'table-row-group' : 'block', }} > {renderContent()} diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index 18883af4c..5bfc467fe 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -1,6 +1,6 @@ import React from 'react'; -import {calculateElementOffsetTop, rafThrottle} from './utils'; +import {rafThrottle} from './utils'; interface UseScrollBasedChunksProps { scrollContainerRef: React.RefObject; @@ -8,10 +8,21 @@ interface UseScrollBasedChunksProps { totalItems: number; rowHeight: number; chunkSize: number; - overscanCount?: number; + renderOverscan?: number; + fetchOverscan?: number; + tableOffset: number; } -const DEFAULT_OVERSCAN_COUNT = 2; +interface ChunkState { + shouldRender: boolean; + shouldFetch: boolean; +} + +const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent); + +// Bad performance in Safari - reduce overscan counts +const DEFAULT_RENDER_OVERSCAN = isSafari ? 1 : 2; +const DEFAULT_FETCH_OVERSCAN = isSafari ? 2 : 4; export const useScrollBasedChunks = ({ scrollContainerRef, @@ -19,17 +30,17 @@ export const useScrollBasedChunks = ({ totalItems, rowHeight, chunkSize, - overscanCount = DEFAULT_OVERSCAN_COUNT, -}: UseScrollBasedChunksProps): boolean[] => { + tableOffset, + renderOverscan = DEFAULT_RENDER_OVERSCAN, + fetchOverscan = DEFAULT_FETCH_OVERSCAN, +}: UseScrollBasedChunksProps): ChunkState[] => { const chunksCount = React.useMemo( () => Math.ceil(totalItems / chunkSize), [chunkSize, totalItems], ); - const [startChunk, setStartChunk] = React.useState(0); - const [endChunk, setEndChunk] = React.useState( - Math.min(overscanCount, Math.max(chunksCount - 1, 0)), - ); + const [visibleStartChunk, setVisibleStartChunk] = React.useState(0); + const [visibleEndChunk, setVisibleEndChunk] = React.useState(0); const calculateVisibleRange = React.useCallback(() => { const container = scrollContainerRef?.current; @@ -38,24 +49,23 @@ export const useScrollBasedChunks = ({ return null; } - const tableOffset = calculateElementOffsetTop(table, container); const containerScroll = container.scrollTop; const visibleStart = Math.max(containerScroll - tableOffset, 0); const visibleEnd = visibleStart + container.clientHeight; - const start = Math.max(Math.floor(visibleStart / rowHeight / chunkSize) - overscanCount, 0); + const start = Math.max(Math.floor(visibleStart / rowHeight / chunkSize), 0); const end = Math.min( - Math.floor(visibleEnd / rowHeight / chunkSize) + overscanCount, + Math.floor(visibleEnd / rowHeight / chunkSize), Math.max(chunksCount - 1, 0), ); return {start, end}; - }, [scrollContainerRef, tableRef, rowHeight, chunkSize, overscanCount, chunksCount]); + }, [scrollContainerRef, tableRef, tableOffset, rowHeight, chunkSize, chunksCount]); const updateVisibleChunks = React.useCallback(() => { const newRange = calculateVisibleRange(); if (newRange) { - setStartChunk(newRange.start); - setEndChunk(newRange.end); + setVisibleStartChunk(newRange.start); + setVisibleEndChunk(newRange.end); } }, [calculateVisibleRange]); @@ -94,11 +104,28 @@ export const useScrollBasedChunks = ({ }, [handleScroll, scrollContainerRef]); return React.useMemo(() => { - // boolean array that represents active chunks - const activeChunks = Array(chunksCount).fill(false); - for (let i = startChunk; i <= endChunk; i++) { - activeChunks[i] = true; - } - return activeChunks; - }, [chunksCount, startChunk, endChunk]); + // Calculate render range (visible + render overscan) + const renderStartChunk = Math.max(visibleStartChunk - renderOverscan, 0); + const renderEndChunk = Math.min( + visibleEndChunk + renderOverscan, + Math.max(chunksCount - 1, 0), + ); + + // Calculate fetch range (visible + fetch overscan) + const fetchStartChunk = Math.max(visibleStartChunk - fetchOverscan, 0); + const fetchEndChunk = Math.min( + visibleEndChunk + fetchOverscan, + Math.max(chunksCount - 1, 0), + ); + + // Create chunk states array + const chunkStates: ChunkState[] = Array(chunksCount) + .fill(null) + .map((_, index) => ({ + shouldRender: index >= renderStartChunk && index <= renderEndChunk, + shouldFetch: index >= fetchStartChunk && index <= fetchEndChunk, + })); + + return chunkStates; + }, [chunksCount, visibleStartChunk, visibleEndChunk, renderOverscan, fetchOverscan]); }; From 4a3a487a2a71091e263615e3df5bd531e2ca1592 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Tue, 27 May 2025 23:01:48 +0300 Subject: [PATCH 04/21] Revert "Revert "fix: remove mocks"" This reverts commit eeb6189c80a40bfa9eea650ddbf20910cfdeb23b. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ------------------ 2 files changed, 17 insertions(+), 257 deletions(-) delete mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8ed3d393..c8cc7d7f5 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,8 +13,6 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; -import {generateNodes} from './nodes'; - export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -45,37 +43,23 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - let response; - const urlParams = new URLSearchParams(window.location.search); - if (urlParams.get('mocks')) { - // Get mock configuration from URL parameters or use defaults - const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); - const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); - const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); - response = generateNodes(totalNodes, { - maxVdisksPerPDisk: vdisksPerPDisk, - maxPdisks: pdisks, - offset, - limit, - }); - } else { - response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); - } + + const response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts deleted file mode 100644 index 1c4cfe187..000000000 --- a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts +++ /dev/null @@ -1,224 +0,0 @@ -import {EFlag} from '../../../types/api/enums'; -import type { - TEndpoint, - TNodeInfo, - TNodesInfo, - TPoolStats, - TSystemStateInfo, -} from '../../../types/api/nodes'; -import {TPDiskState} from '../../../types/api/pdisk'; -import {EVDiskState} from '../../../types/api/vdisk'; - -// Different disk sizes to simulate variety (in bytes) -const DISK_SIZES = [ - '68719476736', // 64 GB - '137438953472', // 128 GB - '274877906944', // 256 GB - '549755813888', // 512 GB - '1099511627776', // 1 TB -]; - -const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; - -const generatePoolStats = (count = 5): TPoolStats[] => { - const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; - return poolNames.slice(0, count).map((Name) => ({ - Name, - Usage: Math.random() * 0.02, - Threads: Math.floor(Math.random() * 3) + 1, - })); -}; - -const generateEndpoints = (): TEndpoint[] => [ - {Name: 'ic', Address: ':19001'}, - {Name: 'http-mon', Address: ':8765'}, - {Name: 'grpcs', Address: ':2135'}, - {Name: 'grpc', Address: ':2136'}, -]; - -const generateSystemState = (nodeId: number): TSystemStateInfo => ({ - StartTime: '1734358137851', - ChangeTime: '1734358421375', - LoadAverage: [3.381347656, 2.489257813, 1.279296875], - NumberOfCpus: 8, - SystemState: EFlag.Green, - NodeId: nodeId, - Host: `localhost-${nodeId}`, - Version: 'main.95ce0df', - PoolStats: generatePoolStats(), - Endpoints: generateEndpoints(), - Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], - MemoryLimit: '2147483648', - MaxDiskUsage: 0.002349853516, - Location: { - DataCenter: '1', - Rack: '1', - Unit: '1', - }, - TotalSessions: 0, - CoresUsed: 0.07583969556, - CoresTotal: 8, -}); - -const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ - PDiskId: pdiskId, - ChangeTime: '1734358142074', - Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, - Guid: pdiskId.toString(), - Category: '0', - TotalSize: totalSize, - AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default - State: TPDiskState.Normal, - NodeId: nodeId, - Device: EFlag.Green, - Realtime: EFlag.Green, - SerialNumber: '', - SystemSize: '213909504', - LogUsedSize: '35651584', - LogTotalSize: '68486692864', - EnforcedDynamicSlotSize: '22817013760', -}); - -const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ - VDiskId: { - GroupID: vdiskId, - GroupGeneration: 1, - Ring: 0, - Domain: 0, - VDisk: 0, - }, - ChangeTime: '1734358420919', - PDiskId: pdiskId, - VDiskSlotId: vdiskId, - Guid: '1', - Kind: '0', - NodeId: nodeId, - VDiskState: EVDiskState.OK, - DiskSpace: EFlag.Green, - SatisfactionRank: { - FreshRank: { - Flag: EFlag.Green, - }, - LevelRank: { - Flag: EFlag.Green, - }, - }, - Replicated: true, - ReplicationProgress: 1, - ReplicationSecondsRemaining: 0, - AllocatedSize: '0', - AvailableSize: '22817013760', - HasUnreadableBlobs: false, - IncarnationGuid: '11528832187803248876', - InstanceGuid: '14836434871903384493', - FrontQueues: EFlag.Green, - StoragePoolName: 'static', - ReadThroughput: '0', - WriteThroughput: '420', -}); - -interface NodeGeneratorOptions { - maxVdisksPerPDisk?: number; - maxPdisks?: number; -} - -const DEFAULT_OPTIONS: NodeGeneratorOptions = { - maxVdisksPerPDisk: 3, - maxPdisks: 4, -}; - -const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { - const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; - const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; - - // Generate a random number of pdisks up to maxPdisks - const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; - - // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk - const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => - Math.floor(Math.random() * maxVdisksPerPDisk), - ); - const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); - - return { - NodeId: nodeId, - UptimeSeconds: 284, - CpuUsage: 0.00947996, - DiskSpaceUsage: 0.234985, - SystemState: generateSystemState(nodeId), - PDisks: Array.from({length: pdisksCount}, (_, i) => - generatePDisk(nodeId, i + 1, getRandomDiskSize()), - ), - VDisks: Array.from({length: totalVdisks}, (_, i) => { - // Find which pdisk this vdisk belongs to based on the distribution - let pdiskIndex = 0; - let vdiskCount = pdiskVdisksCounts[0]; - while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { - pdiskIndex++; - vdiskCount += pdiskVdisksCounts[pdiskIndex]; - } - return generateVDisk(nodeId, i, pdiskIndex + 1); - }), - }; -}; - -interface GenerateNodesOptions extends NodeGeneratorOptions { - offset?: number; - limit?: number; -} - -// Keep a cache of generated nodes to maintain consistency between paginated requests -let cachedNodes: TNodeInfo[] | null = null; -let currentTotalNodes = 50; // Default number of nodes - -export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { - const totalNodes = count ?? currentTotalNodes; - const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; - - // Reset cache if total nodes count changes - if (totalNodes !== currentTotalNodes) { - cachedNodes = null; - currentTotalNodes = totalNodes; - } - - // Generate or use cached nodes - if (!cachedNodes) { - cachedNodes = Array.from({length: totalNodes}, (_, i) => - generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), - ); - } - - // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes - let maxSlotsPerDisk = 0; - let maxDisksPerNode = 0; - - cachedNodes.forEach((node) => { - // Count pdisks per node - if (node.PDisks) { - maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); - } - - // Count vdisks per pdisk - if (node.VDisks) { - const pdiskVdiskCounts = new Map(); - node.VDisks.forEach((vdisk) => { - if (typeof vdisk.PDiskId === 'number') { - const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; - pdiskVdiskCounts.set(vdisk.PDiskId, count); - maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); - } - }); - } - }); - - // Get the requested slice of nodes - const paginatedNodes = cachedNodes.slice(offset, offset + limit); - - return { - TotalNodes: totalNodes.toString(), - FoundNodes: totalNodes.toString(), - Nodes: paginatedNodes, - MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), - MaximumDisksPerNode: maxDisksPerNode.toString(), - }; -}; From af0471e47363ca4bb2d843cf7265def68ddc651e Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 13:10:45 +0300 Subject: [PATCH 05/21] feat: request batching --- .../PaginatedTable/requestBatcher.ts | 197 ++++++++++++++++++ .../PaginatedTable/useScrollBasedChunks.ts | 2 +- src/store/reducers/tableData.ts | 29 ++- 3 files changed, 218 insertions(+), 10 deletions(-) create mode 100644 src/components/PaginatedTable/requestBatcher.ts diff --git a/src/components/PaginatedTable/requestBatcher.ts b/src/components/PaginatedTable/requestBatcher.ts new file mode 100644 index 000000000..ad50bdc8f --- /dev/null +++ b/src/components/PaginatedTable/requestBatcher.ts @@ -0,0 +1,197 @@ +import type {BaseQueryFn} from '@reduxjs/toolkit/query'; + +import type {FetchData, PaginatedTableData, SortParams} from './types'; + +interface PaginatedTableParams { + offset: number; + fetchData: FetchData; + filters: F; + limit: number; + sortParams?: SortParams; + columnsIds: string[]; + tableName: string; +} + +interface QueuedRequest { + params: PaginatedTableParams; + resolve: (result: {data: PaginatedTableData} | {error: unknown}) => void; + reject: (error: unknown) => void; + signal?: AbortSignal; +} + +interface BatchGroup { + requests: QueuedRequest[]; + batchKey: string; + minOffset: number; + maxOffset: number; + totalLimit: number; +} + +class RequestBatcher { + private requestQueue = new Map[]>(); + private batchTimeout: NodeJS.Timeout | null = null; + private readonly BATCH_DELAY = 50; // ms + + queueRequest( + params: PaginatedTableParams, + signal?: AbortSignal, + ): Promise<{data: PaginatedTableData} | {error: unknown}> { + return new Promise((resolve, reject) => { + const batchKey = this.createBatchKey(params); + + if (!this.requestQueue.has(batchKey)) { + this.requestQueue.set(batchKey, []); + } + + this.requestQueue.get(batchKey)!.push({ + params, + resolve, + reject, + signal, + }); + + // Reset the batch timeout + if (this.batchTimeout) { + clearTimeout(this.batchTimeout); + } + + this.batchTimeout = setTimeout(() => { + this.processBatch(); + }, this.BATCH_DELAY); + }); + } + + private createBatchKey(params: PaginatedTableParams): string { + return JSON.stringify({ + tableName: params.tableName, + filters: params.filters, + sortParams: params.sortParams, + columnsIds: params.columnsIds, + limit: params.limit, + }); + } + + private groupConsecutiveRequests(requests: QueuedRequest[]): BatchGroup[] { + if (requests.length === 0) { + return []; + } + + const sorted = requests.sort((a, b) => a.params.offset - b.params.offset); + const groups: BatchGroup[] = []; + let currentGroup: QueuedRequest[] = [sorted[0]]; + + const limit = sorted[0].params.limit; + + for (let i = 1; i < sorted.length; i++) { + const expectedOffset = currentGroup[currentGroup.length - 1].params.offset + limit; + + if (sorted[i].params.offset === expectedOffset) { + // Consecutive request + currentGroup.push(sorted[i]); + } else { + // Non-consecutive, create a new group + groups.push(this.createBatchGroup(currentGroup)); + currentGroup = [sorted[i]]; + } + } + + // Add the last group + groups.push(this.createBatchGroup(currentGroup)); + + return groups; + } + + private createBatchGroup(requests: QueuedRequest[]): BatchGroup { + const minOffset = Math.min(...requests.map((r) => r.params.offset)); + const maxOffset = Math.max(...requests.map((r) => r.params.offset)); + const limit = requests[0].params.limit; + const totalLimit = requests.length * limit; + + return { + requests, + batchKey: this.createBatchKey(requests[0].params), + minOffset, + maxOffset, + totalLimit, + }; + } + + private async executeBatch(group: BatchGroup): Promise { + const firstRequest = group.requests[0]; + const batchParams = { + ...firstRequest.params, + offset: group.minOffset, + limit: group.totalLimit, + }; + + try { + const response = await firstRequest.params.fetchData({ + limit: batchParams.limit, + offset: batchParams.offset, + filters: batchParams.filters, + sortParams: batchParams.sortParams, + columnsIds: batchParams.columnsIds, + signal: firstRequest.signal, + }); + + // Split the response data among individual requests + this.splitAndDistributeResponse(group, response); + } catch (error) { + // If batch fails, reject all requests in the group + group.requests.forEach((request) => { + request.resolve({error}); + }); + } + } + + private splitAndDistributeResponse( + group: BatchGroup, + batchResponse: PaginatedTableData, + ): void { + const limit = group.requests[0].params.limit; + + group.requests.forEach((request, index) => { + const startIndex = index * limit; + const endIndex = startIndex + limit; + const chunkData = batchResponse.data.slice(startIndex, endIndex); + + const chunkResponse: PaginatedTableData = { + data: chunkData, + total: batchResponse.total, + found: batchResponse.found, + }; + + request.resolve({data: chunkResponse}); + }); + } + + private async processBatch(): Promise { + const allQueues = Array.from(this.requestQueue.entries()); + this.requestQueue.clear(); + this.batchTimeout = null; + + for (const [_batchKey, requests] of allQueues) { + const groups = this.groupConsecutiveRequests(requests); + + // Execute each group (consecutive chunks) as a separate batch + await Promise.all(groups.map((group) => this.executeBatch(group))); + } + } +} + +// Singleton instance +export const requestBatcher = new RequestBatcher(); + +// Enhanced base query that uses batching +export const createBatchedBaseQuery = (originalBaseQuery: BaseQueryFn): BaseQueryFn => { + return async (args, api, extraOptions) => { + // Check if this is a fetchTableChunk request + if (typeof args === 'object' && args && 'fetchData' in args) { + const params = args as PaginatedTableParams; + return await requestBatcher.queueRequest(params); + } + + // For non-batchable requests, use original base query + return originalBaseQuery(args, api, extraOptions); + }; +}; diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index 5bfc467fe..b6a7c0637 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -22,7 +22,7 @@ const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent); // Bad performance in Safari - reduce overscan counts const DEFAULT_RENDER_OVERSCAN = isSafari ? 1 : 2; -const DEFAULT_FETCH_OVERSCAN = isSafari ? 2 : 4; +const DEFAULT_FETCH_OVERSCAN = 4; export const useScrollBasedChunks = ({ scrollContainerRef, diff --git a/src/store/reducers/tableData.ts b/src/store/reducers/tableData.ts index a22379f32..50517f9cc 100644 --- a/src/store/reducers/tableData.ts +++ b/src/store/reducers/tableData.ts @@ -1,6 +1,7 @@ import type {BaseQueryFn, EndpointBuilder} from '@reduxjs/toolkit/query'; import type {FetchData, PaginatedTableData, SortParams} from '../../components/PaginatedTable'; +import {requestBatcher} from '../../components/PaginatedTable/requestBatcher'; import {api} from './api'; @@ -18,19 +19,29 @@ function endpoints(build: EndpointBuilder) { return { fetchTableChunk: build.query, PaginatedTableParams>({ queryFn: async ( - {offset, limit, sortParams, filters, columnsIds, fetchData}, + {offset, limit, sortParams, filters, columnsIds, fetchData, tableName}, {signal}, ) => { try { - const response = await fetchData({ - limit, - offset, - filters, - sortParams, - columnsIds, + // Use the request batcher for potential merging + const result = await requestBatcher.queueRequest( + { + offset, + limit, + sortParams, + filters, + columnsIds, + fetchData, + tableName, + }, signal, - }); - return {data: response}; + ); + + if ('error' in result) { + return {error: result.error}; + } + + return result; } catch (error) { return {error: error}; } From 71d7a128e30c327cb8038f45aafca578d8a32008 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 13:45:39 +0300 Subject: [PATCH 06/21] fix: column widths --- src/components/PaginatedTable/requestBatcher.ts | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/src/components/PaginatedTable/requestBatcher.ts b/src/components/PaginatedTable/requestBatcher.ts index ad50bdc8f..60cdadfc1 100644 --- a/src/components/PaginatedTable/requestBatcher.ts +++ b/src/components/PaginatedTable/requestBatcher.ts @@ -1,5 +1,3 @@ -import type {BaseQueryFn} from '@reduxjs/toolkit/query'; - import type {FetchData, PaginatedTableData, SortParams} from './types'; interface PaginatedTableParams { @@ -156,6 +154,7 @@ class RequestBatcher { const chunkData = batchResponse.data.slice(startIndex, endIndex); const chunkResponse: PaginatedTableData = { + ...batchResponse, data: chunkData, total: batchResponse.total, found: batchResponse.found, @@ -181,17 +180,3 @@ class RequestBatcher { // Singleton instance export const requestBatcher = new RequestBatcher(); - -// Enhanced base query that uses batching -export const createBatchedBaseQuery = (originalBaseQuery: BaseQueryFn): BaseQueryFn => { - return async (args, api, extraOptions) => { - // Check if this is a fetchTableChunk request - if (typeof args === 'object' && args && 'fetchData' in args) { - const params = args as PaginatedTableParams; - return await requestBatcher.queueRequest(params); - } - - // For non-batchable requests, use original base query - return originalBaseQuery(args, api, extraOptions); - }; -}; From 8ef43cd05443c3566e62c6f01fd231d53199675b Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 15:22:27 +0300 Subject: [PATCH 07/21] fix: mark scroll as passive --- src/components/PaginatedTable/useScrollBasedChunks.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index b6a7c0637..d6aa1c55d 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -97,7 +97,7 @@ export const useScrollBasedChunks = ({ const throttledHandleScroll = rafThrottle(handleScroll); - container.addEventListener('scroll', throttledHandleScroll); + container.addEventListener('scroll', throttledHandleScroll, {passive: true}); return () => { container.removeEventListener('scroll', throttledHandleScroll); }; From 4b78861237a269a76fe062d757ea704a2ab18495 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 16:11:25 +0300 Subject: [PATCH 08/21] Revert "Revert "Revert "fix: remove mocks""" This reverts commit 4a3a487a2a71091e263615e3df5bd531e2ca1592. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ++++++++++++++++++ 2 files changed, 257 insertions(+), 17 deletions(-) create mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8cc7d7f5..c8ed3d393 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; +import {generateNodes} from './nodes'; + export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -43,23 +45,37 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - - const response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); + let response; + const urlParams = new URLSearchParams(window.location.search); + if (urlParams.get('mocks')) { + // Get mock configuration from URL parameters or use defaults + const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); + const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); + const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); + response = generateNodes(totalNodes, { + maxVdisksPerPDisk: vdisksPerPDisk, + maxPdisks: pdisks, + offset, + limit, + }); + } else { + response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); + } const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts new file mode 100644 index 000000000..1c4cfe187 --- /dev/null +++ b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts @@ -0,0 +1,224 @@ +import {EFlag} from '../../../types/api/enums'; +import type { + TEndpoint, + TNodeInfo, + TNodesInfo, + TPoolStats, + TSystemStateInfo, +} from '../../../types/api/nodes'; +import {TPDiskState} from '../../../types/api/pdisk'; +import {EVDiskState} from '../../../types/api/vdisk'; + +// Different disk sizes to simulate variety (in bytes) +const DISK_SIZES = [ + '68719476736', // 64 GB + '137438953472', // 128 GB + '274877906944', // 256 GB + '549755813888', // 512 GB + '1099511627776', // 1 TB +]; + +const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; + +const generatePoolStats = (count = 5): TPoolStats[] => { + const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; + return poolNames.slice(0, count).map((Name) => ({ + Name, + Usage: Math.random() * 0.02, + Threads: Math.floor(Math.random() * 3) + 1, + })); +}; + +const generateEndpoints = (): TEndpoint[] => [ + {Name: 'ic', Address: ':19001'}, + {Name: 'http-mon', Address: ':8765'}, + {Name: 'grpcs', Address: ':2135'}, + {Name: 'grpc', Address: ':2136'}, +]; + +const generateSystemState = (nodeId: number): TSystemStateInfo => ({ + StartTime: '1734358137851', + ChangeTime: '1734358421375', + LoadAverage: [3.381347656, 2.489257813, 1.279296875], + NumberOfCpus: 8, + SystemState: EFlag.Green, + NodeId: nodeId, + Host: `localhost-${nodeId}`, + Version: 'main.95ce0df', + PoolStats: generatePoolStats(), + Endpoints: generateEndpoints(), + Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], + MemoryLimit: '2147483648', + MaxDiskUsage: 0.002349853516, + Location: { + DataCenter: '1', + Rack: '1', + Unit: '1', + }, + TotalSessions: 0, + CoresUsed: 0.07583969556, + CoresTotal: 8, +}); + +const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ + PDiskId: pdiskId, + ChangeTime: '1734358142074', + Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, + Guid: pdiskId.toString(), + Category: '0', + TotalSize: totalSize, + AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default + State: TPDiskState.Normal, + NodeId: nodeId, + Device: EFlag.Green, + Realtime: EFlag.Green, + SerialNumber: '', + SystemSize: '213909504', + LogUsedSize: '35651584', + LogTotalSize: '68486692864', + EnforcedDynamicSlotSize: '22817013760', +}); + +const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ + VDiskId: { + GroupID: vdiskId, + GroupGeneration: 1, + Ring: 0, + Domain: 0, + VDisk: 0, + }, + ChangeTime: '1734358420919', + PDiskId: pdiskId, + VDiskSlotId: vdiskId, + Guid: '1', + Kind: '0', + NodeId: nodeId, + VDiskState: EVDiskState.OK, + DiskSpace: EFlag.Green, + SatisfactionRank: { + FreshRank: { + Flag: EFlag.Green, + }, + LevelRank: { + Flag: EFlag.Green, + }, + }, + Replicated: true, + ReplicationProgress: 1, + ReplicationSecondsRemaining: 0, + AllocatedSize: '0', + AvailableSize: '22817013760', + HasUnreadableBlobs: false, + IncarnationGuid: '11528832187803248876', + InstanceGuid: '14836434871903384493', + FrontQueues: EFlag.Green, + StoragePoolName: 'static', + ReadThroughput: '0', + WriteThroughput: '420', +}); + +interface NodeGeneratorOptions { + maxVdisksPerPDisk?: number; + maxPdisks?: number; +} + +const DEFAULT_OPTIONS: NodeGeneratorOptions = { + maxVdisksPerPDisk: 3, + maxPdisks: 4, +}; + +const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { + const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; + const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; + + // Generate a random number of pdisks up to maxPdisks + const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; + + // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk + const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => + Math.floor(Math.random() * maxVdisksPerPDisk), + ); + const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); + + return { + NodeId: nodeId, + UptimeSeconds: 284, + CpuUsage: 0.00947996, + DiskSpaceUsage: 0.234985, + SystemState: generateSystemState(nodeId), + PDisks: Array.from({length: pdisksCount}, (_, i) => + generatePDisk(nodeId, i + 1, getRandomDiskSize()), + ), + VDisks: Array.from({length: totalVdisks}, (_, i) => { + // Find which pdisk this vdisk belongs to based on the distribution + let pdiskIndex = 0; + let vdiskCount = pdiskVdisksCounts[0]; + while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { + pdiskIndex++; + vdiskCount += pdiskVdisksCounts[pdiskIndex]; + } + return generateVDisk(nodeId, i, pdiskIndex + 1); + }), + }; +}; + +interface GenerateNodesOptions extends NodeGeneratorOptions { + offset?: number; + limit?: number; +} + +// Keep a cache of generated nodes to maintain consistency between paginated requests +let cachedNodes: TNodeInfo[] | null = null; +let currentTotalNodes = 50; // Default number of nodes + +export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { + const totalNodes = count ?? currentTotalNodes; + const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; + + // Reset cache if total nodes count changes + if (totalNodes !== currentTotalNodes) { + cachedNodes = null; + currentTotalNodes = totalNodes; + } + + // Generate or use cached nodes + if (!cachedNodes) { + cachedNodes = Array.from({length: totalNodes}, (_, i) => + generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), + ); + } + + // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes + let maxSlotsPerDisk = 0; + let maxDisksPerNode = 0; + + cachedNodes.forEach((node) => { + // Count pdisks per node + if (node.PDisks) { + maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); + } + + // Count vdisks per pdisk + if (node.VDisks) { + const pdiskVdiskCounts = new Map(); + node.VDisks.forEach((vdisk) => { + if (typeof vdisk.PDiskId === 'number') { + const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; + pdiskVdiskCounts.set(vdisk.PDiskId, count); + maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); + } + }); + } + }); + + // Get the requested slice of nodes + const paginatedNodes = cachedNodes.slice(offset, offset + limit); + + return { + TotalNodes: totalNodes.toString(), + FoundNodes: totalNodes.toString(), + Nodes: paginatedNodes, + MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), + MaximumDisksPerNode: maxDisksPerNode.toString(), + }; +}; From 4db4b3edffe325abeebfa065f7d49ec6900957e7 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 17:11:34 +0300 Subject: [PATCH 09/21] Revert "Revert "Revert "Revert "fix: remove mocks"""" This reverts commit 4b78861237a269a76fe062d757ea704a2ab18495. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ------------------ 2 files changed, 17 insertions(+), 257 deletions(-) delete mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8ed3d393..c8cc7d7f5 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,8 +13,6 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; -import {generateNodes} from './nodes'; - export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -45,37 +43,23 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - let response; - const urlParams = new URLSearchParams(window.location.search); - if (urlParams.get('mocks')) { - // Get mock configuration from URL parameters or use defaults - const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); - const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); - const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); - response = generateNodes(totalNodes, { - maxVdisksPerPDisk: vdisksPerPDisk, - maxPdisks: pdisks, - offset, - limit, - }); - } else { - response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); - } + + const response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts deleted file mode 100644 index 1c4cfe187..000000000 --- a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts +++ /dev/null @@ -1,224 +0,0 @@ -import {EFlag} from '../../../types/api/enums'; -import type { - TEndpoint, - TNodeInfo, - TNodesInfo, - TPoolStats, - TSystemStateInfo, -} from '../../../types/api/nodes'; -import {TPDiskState} from '../../../types/api/pdisk'; -import {EVDiskState} from '../../../types/api/vdisk'; - -// Different disk sizes to simulate variety (in bytes) -const DISK_SIZES = [ - '68719476736', // 64 GB - '137438953472', // 128 GB - '274877906944', // 256 GB - '549755813888', // 512 GB - '1099511627776', // 1 TB -]; - -const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; - -const generatePoolStats = (count = 5): TPoolStats[] => { - const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; - return poolNames.slice(0, count).map((Name) => ({ - Name, - Usage: Math.random() * 0.02, - Threads: Math.floor(Math.random() * 3) + 1, - })); -}; - -const generateEndpoints = (): TEndpoint[] => [ - {Name: 'ic', Address: ':19001'}, - {Name: 'http-mon', Address: ':8765'}, - {Name: 'grpcs', Address: ':2135'}, - {Name: 'grpc', Address: ':2136'}, -]; - -const generateSystemState = (nodeId: number): TSystemStateInfo => ({ - StartTime: '1734358137851', - ChangeTime: '1734358421375', - LoadAverage: [3.381347656, 2.489257813, 1.279296875], - NumberOfCpus: 8, - SystemState: EFlag.Green, - NodeId: nodeId, - Host: `localhost-${nodeId}`, - Version: 'main.95ce0df', - PoolStats: generatePoolStats(), - Endpoints: generateEndpoints(), - Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], - MemoryLimit: '2147483648', - MaxDiskUsage: 0.002349853516, - Location: { - DataCenter: '1', - Rack: '1', - Unit: '1', - }, - TotalSessions: 0, - CoresUsed: 0.07583969556, - CoresTotal: 8, -}); - -const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ - PDiskId: pdiskId, - ChangeTime: '1734358142074', - Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, - Guid: pdiskId.toString(), - Category: '0', - TotalSize: totalSize, - AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default - State: TPDiskState.Normal, - NodeId: nodeId, - Device: EFlag.Green, - Realtime: EFlag.Green, - SerialNumber: '', - SystemSize: '213909504', - LogUsedSize: '35651584', - LogTotalSize: '68486692864', - EnforcedDynamicSlotSize: '22817013760', -}); - -const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ - VDiskId: { - GroupID: vdiskId, - GroupGeneration: 1, - Ring: 0, - Domain: 0, - VDisk: 0, - }, - ChangeTime: '1734358420919', - PDiskId: pdiskId, - VDiskSlotId: vdiskId, - Guid: '1', - Kind: '0', - NodeId: nodeId, - VDiskState: EVDiskState.OK, - DiskSpace: EFlag.Green, - SatisfactionRank: { - FreshRank: { - Flag: EFlag.Green, - }, - LevelRank: { - Flag: EFlag.Green, - }, - }, - Replicated: true, - ReplicationProgress: 1, - ReplicationSecondsRemaining: 0, - AllocatedSize: '0', - AvailableSize: '22817013760', - HasUnreadableBlobs: false, - IncarnationGuid: '11528832187803248876', - InstanceGuid: '14836434871903384493', - FrontQueues: EFlag.Green, - StoragePoolName: 'static', - ReadThroughput: '0', - WriteThroughput: '420', -}); - -interface NodeGeneratorOptions { - maxVdisksPerPDisk?: number; - maxPdisks?: number; -} - -const DEFAULT_OPTIONS: NodeGeneratorOptions = { - maxVdisksPerPDisk: 3, - maxPdisks: 4, -}; - -const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { - const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; - const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; - - // Generate a random number of pdisks up to maxPdisks - const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; - - // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk - const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => - Math.floor(Math.random() * maxVdisksPerPDisk), - ); - const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); - - return { - NodeId: nodeId, - UptimeSeconds: 284, - CpuUsage: 0.00947996, - DiskSpaceUsage: 0.234985, - SystemState: generateSystemState(nodeId), - PDisks: Array.from({length: pdisksCount}, (_, i) => - generatePDisk(nodeId, i + 1, getRandomDiskSize()), - ), - VDisks: Array.from({length: totalVdisks}, (_, i) => { - // Find which pdisk this vdisk belongs to based on the distribution - let pdiskIndex = 0; - let vdiskCount = pdiskVdisksCounts[0]; - while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { - pdiskIndex++; - vdiskCount += pdiskVdisksCounts[pdiskIndex]; - } - return generateVDisk(nodeId, i, pdiskIndex + 1); - }), - }; -}; - -interface GenerateNodesOptions extends NodeGeneratorOptions { - offset?: number; - limit?: number; -} - -// Keep a cache of generated nodes to maintain consistency between paginated requests -let cachedNodes: TNodeInfo[] | null = null; -let currentTotalNodes = 50; // Default number of nodes - -export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { - const totalNodes = count ?? currentTotalNodes; - const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; - - // Reset cache if total nodes count changes - if (totalNodes !== currentTotalNodes) { - cachedNodes = null; - currentTotalNodes = totalNodes; - } - - // Generate or use cached nodes - if (!cachedNodes) { - cachedNodes = Array.from({length: totalNodes}, (_, i) => - generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), - ); - } - - // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes - let maxSlotsPerDisk = 0; - let maxDisksPerNode = 0; - - cachedNodes.forEach((node) => { - // Count pdisks per node - if (node.PDisks) { - maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); - } - - // Count vdisks per pdisk - if (node.VDisks) { - const pdiskVdiskCounts = new Map(); - node.VDisks.forEach((vdisk) => { - if (typeof vdisk.PDiskId === 'number') { - const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; - pdiskVdiskCounts.set(vdisk.PDiskId, count); - maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); - } - }); - } - }); - - // Get the requested slice of nodes - const paginatedNodes = cachedNodes.slice(offset, offset + limit); - - return { - TotalNodes: totalNodes.toString(), - FoundNodes: totalNodes.toString(), - Nodes: paginatedNodes, - MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), - MaximumDisksPerNode: maxDisksPerNode.toString(), - }; -}; From 843ec953bb0679d5762260425604008cdce984f6 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 17:50:51 +0300 Subject: [PATCH 10/21] Revert "Revert "Revert "Revert "Revert "fix: remove mocks""""" This reverts commit 4db4b3edffe325abeebfa065f7d49ec6900957e7. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ++++++++++++++++++ 2 files changed, 257 insertions(+), 17 deletions(-) create mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8cc7d7f5..c8ed3d393 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; +import {generateNodes} from './nodes'; + export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -43,23 +45,37 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - - const response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); + let response; + const urlParams = new URLSearchParams(window.location.search); + if (urlParams.get('mocks')) { + // Get mock configuration from URL parameters or use defaults + const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); + const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); + const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); + response = generateNodes(totalNodes, { + maxVdisksPerPDisk: vdisksPerPDisk, + maxPdisks: pdisks, + offset, + limit, + }); + } else { + response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); + } const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts new file mode 100644 index 000000000..1c4cfe187 --- /dev/null +++ b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts @@ -0,0 +1,224 @@ +import {EFlag} from '../../../types/api/enums'; +import type { + TEndpoint, + TNodeInfo, + TNodesInfo, + TPoolStats, + TSystemStateInfo, +} from '../../../types/api/nodes'; +import {TPDiskState} from '../../../types/api/pdisk'; +import {EVDiskState} from '../../../types/api/vdisk'; + +// Different disk sizes to simulate variety (in bytes) +const DISK_SIZES = [ + '68719476736', // 64 GB + '137438953472', // 128 GB + '274877906944', // 256 GB + '549755813888', // 512 GB + '1099511627776', // 1 TB +]; + +const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; + +const generatePoolStats = (count = 5): TPoolStats[] => { + const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; + return poolNames.slice(0, count).map((Name) => ({ + Name, + Usage: Math.random() * 0.02, + Threads: Math.floor(Math.random() * 3) + 1, + })); +}; + +const generateEndpoints = (): TEndpoint[] => [ + {Name: 'ic', Address: ':19001'}, + {Name: 'http-mon', Address: ':8765'}, + {Name: 'grpcs', Address: ':2135'}, + {Name: 'grpc', Address: ':2136'}, +]; + +const generateSystemState = (nodeId: number): TSystemStateInfo => ({ + StartTime: '1734358137851', + ChangeTime: '1734358421375', + LoadAverage: [3.381347656, 2.489257813, 1.279296875], + NumberOfCpus: 8, + SystemState: EFlag.Green, + NodeId: nodeId, + Host: `localhost-${nodeId}`, + Version: 'main.95ce0df', + PoolStats: generatePoolStats(), + Endpoints: generateEndpoints(), + Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], + MemoryLimit: '2147483648', + MaxDiskUsage: 0.002349853516, + Location: { + DataCenter: '1', + Rack: '1', + Unit: '1', + }, + TotalSessions: 0, + CoresUsed: 0.07583969556, + CoresTotal: 8, +}); + +const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ + PDiskId: pdiskId, + ChangeTime: '1734358142074', + Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, + Guid: pdiskId.toString(), + Category: '0', + TotalSize: totalSize, + AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default + State: TPDiskState.Normal, + NodeId: nodeId, + Device: EFlag.Green, + Realtime: EFlag.Green, + SerialNumber: '', + SystemSize: '213909504', + LogUsedSize: '35651584', + LogTotalSize: '68486692864', + EnforcedDynamicSlotSize: '22817013760', +}); + +const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ + VDiskId: { + GroupID: vdiskId, + GroupGeneration: 1, + Ring: 0, + Domain: 0, + VDisk: 0, + }, + ChangeTime: '1734358420919', + PDiskId: pdiskId, + VDiskSlotId: vdiskId, + Guid: '1', + Kind: '0', + NodeId: nodeId, + VDiskState: EVDiskState.OK, + DiskSpace: EFlag.Green, + SatisfactionRank: { + FreshRank: { + Flag: EFlag.Green, + }, + LevelRank: { + Flag: EFlag.Green, + }, + }, + Replicated: true, + ReplicationProgress: 1, + ReplicationSecondsRemaining: 0, + AllocatedSize: '0', + AvailableSize: '22817013760', + HasUnreadableBlobs: false, + IncarnationGuid: '11528832187803248876', + InstanceGuid: '14836434871903384493', + FrontQueues: EFlag.Green, + StoragePoolName: 'static', + ReadThroughput: '0', + WriteThroughput: '420', +}); + +interface NodeGeneratorOptions { + maxVdisksPerPDisk?: number; + maxPdisks?: number; +} + +const DEFAULT_OPTIONS: NodeGeneratorOptions = { + maxVdisksPerPDisk: 3, + maxPdisks: 4, +}; + +const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { + const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; + const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; + + // Generate a random number of pdisks up to maxPdisks + const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; + + // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk + const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => + Math.floor(Math.random() * maxVdisksPerPDisk), + ); + const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); + + return { + NodeId: nodeId, + UptimeSeconds: 284, + CpuUsage: 0.00947996, + DiskSpaceUsage: 0.234985, + SystemState: generateSystemState(nodeId), + PDisks: Array.from({length: pdisksCount}, (_, i) => + generatePDisk(nodeId, i + 1, getRandomDiskSize()), + ), + VDisks: Array.from({length: totalVdisks}, (_, i) => { + // Find which pdisk this vdisk belongs to based on the distribution + let pdiskIndex = 0; + let vdiskCount = pdiskVdisksCounts[0]; + while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { + pdiskIndex++; + vdiskCount += pdiskVdisksCounts[pdiskIndex]; + } + return generateVDisk(nodeId, i, pdiskIndex + 1); + }), + }; +}; + +interface GenerateNodesOptions extends NodeGeneratorOptions { + offset?: number; + limit?: number; +} + +// Keep a cache of generated nodes to maintain consistency between paginated requests +let cachedNodes: TNodeInfo[] | null = null; +let currentTotalNodes = 50; // Default number of nodes + +export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { + const totalNodes = count ?? currentTotalNodes; + const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; + + // Reset cache if total nodes count changes + if (totalNodes !== currentTotalNodes) { + cachedNodes = null; + currentTotalNodes = totalNodes; + } + + // Generate or use cached nodes + if (!cachedNodes) { + cachedNodes = Array.from({length: totalNodes}, (_, i) => + generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), + ); + } + + // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes + let maxSlotsPerDisk = 0; + let maxDisksPerNode = 0; + + cachedNodes.forEach((node) => { + // Count pdisks per node + if (node.PDisks) { + maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); + } + + // Count vdisks per pdisk + if (node.VDisks) { + const pdiskVdiskCounts = new Map(); + node.VDisks.forEach((vdisk) => { + if (typeof vdisk.PDiskId === 'number') { + const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; + pdiskVdiskCounts.set(vdisk.PDiskId, count); + maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); + } + }); + } + }); + + // Get the requested slice of nodes + const paginatedNodes = cachedNodes.slice(offset, offset + limit); + + return { + TotalNodes: totalNodes.toString(), + FoundNodes: totalNodes.toString(), + Nodes: paginatedNodes, + MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), + MaximumDisksPerNode: maxDisksPerNode.toString(), + }; +}; From dfd810abe6beed330fbd814876e6267be670efb1 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 18:45:10 +0300 Subject: [PATCH 11/21] Revert "Revert "Revert "Revert "Revert "Revert "fix: remove mocks"""""" This reverts commit 843ec953bb0679d5762260425604008cdce984f6. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ------------------ 2 files changed, 17 insertions(+), 257 deletions(-) delete mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8ed3d393..c8cc7d7f5 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,8 +13,6 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; -import {generateNodes} from './nodes'; - export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -45,37 +43,23 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - let response; - const urlParams = new URLSearchParams(window.location.search); - if (urlParams.get('mocks')) { - // Get mock configuration from URL parameters or use defaults - const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); - const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); - const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); - response = generateNodes(totalNodes, { - maxVdisksPerPDisk: vdisksPerPDisk, - maxPdisks: pdisks, - offset, - limit, - }); - } else { - response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); - } + + const response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts deleted file mode 100644 index 1c4cfe187..000000000 --- a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts +++ /dev/null @@ -1,224 +0,0 @@ -import {EFlag} from '../../../types/api/enums'; -import type { - TEndpoint, - TNodeInfo, - TNodesInfo, - TPoolStats, - TSystemStateInfo, -} from '../../../types/api/nodes'; -import {TPDiskState} from '../../../types/api/pdisk'; -import {EVDiskState} from '../../../types/api/vdisk'; - -// Different disk sizes to simulate variety (in bytes) -const DISK_SIZES = [ - '68719476736', // 64 GB - '137438953472', // 128 GB - '274877906944', // 256 GB - '549755813888', // 512 GB - '1099511627776', // 1 TB -]; - -const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; - -const generatePoolStats = (count = 5): TPoolStats[] => { - const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; - return poolNames.slice(0, count).map((Name) => ({ - Name, - Usage: Math.random() * 0.02, - Threads: Math.floor(Math.random() * 3) + 1, - })); -}; - -const generateEndpoints = (): TEndpoint[] => [ - {Name: 'ic', Address: ':19001'}, - {Name: 'http-mon', Address: ':8765'}, - {Name: 'grpcs', Address: ':2135'}, - {Name: 'grpc', Address: ':2136'}, -]; - -const generateSystemState = (nodeId: number): TSystemStateInfo => ({ - StartTime: '1734358137851', - ChangeTime: '1734358421375', - LoadAverage: [3.381347656, 2.489257813, 1.279296875], - NumberOfCpus: 8, - SystemState: EFlag.Green, - NodeId: nodeId, - Host: `localhost-${nodeId}`, - Version: 'main.95ce0df', - PoolStats: generatePoolStats(), - Endpoints: generateEndpoints(), - Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], - MemoryLimit: '2147483648', - MaxDiskUsage: 0.002349853516, - Location: { - DataCenter: '1', - Rack: '1', - Unit: '1', - }, - TotalSessions: 0, - CoresUsed: 0.07583969556, - CoresTotal: 8, -}); - -const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ - PDiskId: pdiskId, - ChangeTime: '1734358142074', - Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, - Guid: pdiskId.toString(), - Category: '0', - TotalSize: totalSize, - AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default - State: TPDiskState.Normal, - NodeId: nodeId, - Device: EFlag.Green, - Realtime: EFlag.Green, - SerialNumber: '', - SystemSize: '213909504', - LogUsedSize: '35651584', - LogTotalSize: '68486692864', - EnforcedDynamicSlotSize: '22817013760', -}); - -const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ - VDiskId: { - GroupID: vdiskId, - GroupGeneration: 1, - Ring: 0, - Domain: 0, - VDisk: 0, - }, - ChangeTime: '1734358420919', - PDiskId: pdiskId, - VDiskSlotId: vdiskId, - Guid: '1', - Kind: '0', - NodeId: nodeId, - VDiskState: EVDiskState.OK, - DiskSpace: EFlag.Green, - SatisfactionRank: { - FreshRank: { - Flag: EFlag.Green, - }, - LevelRank: { - Flag: EFlag.Green, - }, - }, - Replicated: true, - ReplicationProgress: 1, - ReplicationSecondsRemaining: 0, - AllocatedSize: '0', - AvailableSize: '22817013760', - HasUnreadableBlobs: false, - IncarnationGuid: '11528832187803248876', - InstanceGuid: '14836434871903384493', - FrontQueues: EFlag.Green, - StoragePoolName: 'static', - ReadThroughput: '0', - WriteThroughput: '420', -}); - -interface NodeGeneratorOptions { - maxVdisksPerPDisk?: number; - maxPdisks?: number; -} - -const DEFAULT_OPTIONS: NodeGeneratorOptions = { - maxVdisksPerPDisk: 3, - maxPdisks: 4, -}; - -const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { - const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; - const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; - - // Generate a random number of pdisks up to maxPdisks - const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; - - // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk - const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => - Math.floor(Math.random() * maxVdisksPerPDisk), - ); - const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); - - return { - NodeId: nodeId, - UptimeSeconds: 284, - CpuUsage: 0.00947996, - DiskSpaceUsage: 0.234985, - SystemState: generateSystemState(nodeId), - PDisks: Array.from({length: pdisksCount}, (_, i) => - generatePDisk(nodeId, i + 1, getRandomDiskSize()), - ), - VDisks: Array.from({length: totalVdisks}, (_, i) => { - // Find which pdisk this vdisk belongs to based on the distribution - let pdiskIndex = 0; - let vdiskCount = pdiskVdisksCounts[0]; - while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { - pdiskIndex++; - vdiskCount += pdiskVdisksCounts[pdiskIndex]; - } - return generateVDisk(nodeId, i, pdiskIndex + 1); - }), - }; -}; - -interface GenerateNodesOptions extends NodeGeneratorOptions { - offset?: number; - limit?: number; -} - -// Keep a cache of generated nodes to maintain consistency between paginated requests -let cachedNodes: TNodeInfo[] | null = null; -let currentTotalNodes = 50; // Default number of nodes - -export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { - const totalNodes = count ?? currentTotalNodes; - const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; - - // Reset cache if total nodes count changes - if (totalNodes !== currentTotalNodes) { - cachedNodes = null; - currentTotalNodes = totalNodes; - } - - // Generate or use cached nodes - if (!cachedNodes) { - cachedNodes = Array.from({length: totalNodes}, (_, i) => - generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), - ); - } - - // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes - let maxSlotsPerDisk = 0; - let maxDisksPerNode = 0; - - cachedNodes.forEach((node) => { - // Count pdisks per node - if (node.PDisks) { - maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); - } - - // Count vdisks per pdisk - if (node.VDisks) { - const pdiskVdiskCounts = new Map(); - node.VDisks.forEach((vdisk) => { - if (typeof vdisk.PDiskId === 'number') { - const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; - pdiskVdiskCounts.set(vdisk.PDiskId, count); - maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); - } - }); - } - }); - - // Get the requested slice of nodes - const paginatedNodes = cachedNodes.slice(offset, offset + limit); - - return { - TotalNodes: totalNodes.toString(), - FoundNodes: totalNodes.toString(), - Nodes: paginatedNodes, - MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), - MaximumDisksPerNode: maxDisksPerNode.toString(), - }; -}; From d3fb64254cefbbf4f66c213b5f8648dd88bcab97 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 18:45:46 +0300 Subject: [PATCH 12/21] fix: correct table-layout --- src/components/PaginatedTable/PaginatedTable.tsx | 12 +++++------- src/components/PaginatedTable/TableChunk.tsx | 13 ++++++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 5e9abd6e3..f85f07ca8 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -183,13 +183,11 @@ export const PaginatedTable = ({ // Render merged empty tbody for consecutive inactive chunks chunks.push( - , + + + + + , ); } } diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index f0d42d01c..f42870dd3 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -160,13 +160,16 @@ export const TableChunk = typedMemo(function TableChunk({ id={id.toString()} style={{ height: `${dataLength * rowHeight}px`, - // Default display: table-row-group doesn't work in Safari and breaks the table - // display: block works in Safari, but disconnects thead and tbody cell grids - // Hack to make it work in all cases - display: shouldRender ? 'table-row-group' : 'block', + display: 'table-row-group', }} > - {renderContent()} + {shouldRender ? ( + renderContent() + ) : ( + + + + )} ); }); From b845b5616cc024f859954b5ca97f2fa490a30bcf Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Wed, 28 May 2025 19:35:46 +0300 Subject: [PATCH 13/21] fix: one tbody for all --- .../PaginatedTable/PaginatedTable.tsx | 10 +++---- src/components/PaginatedTable/TableChunk.tsx | 30 +++++-------------- src/components/PaginatedTable/TableRow.tsx | 9 +++--- 3 files changed, 17 insertions(+), 32 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index f85f07ca8..7a846d1d4 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -183,11 +183,9 @@ export const PaginatedTable = ({ // Render merged empty tbody for consecutive inactive chunks chunks.push( - - - - - , + + + , ); } } @@ -198,7 +196,7 @@ export const PaginatedTable = ({ const renderTable = () => ( - {renderChunks()} + {renderChunks()}
); diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index f42870dd3..aed31a261 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -112,15 +112,11 @@ export const TableChunk = typedMemo(function TableChunk({ const dataLength = currentData?.data?.length || calculatedCount; const renderContent = () => { - if (!shouldRender) { - return null; - } - if (!currentData) { if (error) { const errorData = error as IResponseError; return ( - + {renderErrorMessage ? ( renderErrorMessage(errorData) ) : ( @@ -138,7 +134,7 @@ export const TableChunk = typedMemo(function TableChunk({ // Data is loaded, but there are no entities in the chunk if (!currentData.data?.length) { return ( - + {renderEmptyDataMessage ? renderEmptyDataMessage() : i18n('empty')} ); @@ -155,21 +151,11 @@ export const TableChunk = typedMemo(function TableChunk({ )); }; - return ( - - {shouldRender ? ( - renderContent() - ) : ( - - - - )} - + return shouldRender ? ( + renderContent() + ) : ( + + + ); }); diff --git a/src/components/PaginatedTable/TableRow.tsx b/src/components/PaginatedTable/TableRow.tsx index 7ce9987b2..28d3d3cb1 100644 --- a/src/components/PaginatedTable/TableRow.tsx +++ b/src/components/PaginatedTable/TableRow.tsx @@ -44,7 +44,7 @@ interface LoadingTableRowProps { export const LoadingTableRow = typedMemo(function ({columns, height}: LoadingTableRowProps) { return ( - + {columns.map((column) => { const resizeable = column.resizeable ?? DEFAULT_RESIZEABLE; @@ -79,7 +79,7 @@ export const TableRow = ({row, columns, getRowClassName, height}: TableRowPr const additionalClassName = getRowClassName?.(row); return ( - + {columns.map((column) => { const resizeable = column.resizeable ?? DEFAULT_RESIZEABLE; @@ -103,11 +103,12 @@ export const TableRow = ({row, columns, getRowClassName, height}: TableRowPr interface EmptyTableRowProps { columns: Column[]; children?: React.ReactNode; + height: number; } -export const EmptyTableRow = ({columns, children}: EmptyTableRowProps) => { +export const EmptyTableRow = ({columns, children, height}: EmptyTableRowProps) => { return ( - + {children} From 2b45f1c825b07f27e7dbf1aea3d165cee2e2960a Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 13:32:22 +0300 Subject: [PATCH 14/21] Revert "Revert "Revert "Revert "Revert "Revert "Revert "fix: remove mocks""""""" This reverts commit dfd810abe6beed330fbd814876e6267be670efb1. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ++++++++++++++++++ 2 files changed, 257 insertions(+), 17 deletions(-) create mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8cc7d7f5..c8ed3d393 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; +import {generateNodes} from './nodes'; + export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -43,23 +45,37 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - - const response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); + let response; + const urlParams = new URLSearchParams(window.location.search); + if (urlParams.get('mocks')) { + // Get mock configuration from URL parameters or use defaults + const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); + const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); + const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); + response = generateNodes(totalNodes, { + maxVdisksPerPDisk: vdisksPerPDisk, + maxPdisks: pdisks, + offset, + limit, + }); + } else { + response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); + } const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts new file mode 100644 index 000000000..1c4cfe187 --- /dev/null +++ b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts @@ -0,0 +1,224 @@ +import {EFlag} from '../../../types/api/enums'; +import type { + TEndpoint, + TNodeInfo, + TNodesInfo, + TPoolStats, + TSystemStateInfo, +} from '../../../types/api/nodes'; +import {TPDiskState} from '../../../types/api/pdisk'; +import {EVDiskState} from '../../../types/api/vdisk'; + +// Different disk sizes to simulate variety (in bytes) +const DISK_SIZES = [ + '68719476736', // 64 GB + '137438953472', // 128 GB + '274877906944', // 256 GB + '549755813888', // 512 GB + '1099511627776', // 1 TB +]; + +const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; + +const generatePoolStats = (count = 5): TPoolStats[] => { + const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; + return poolNames.slice(0, count).map((Name) => ({ + Name, + Usage: Math.random() * 0.02, + Threads: Math.floor(Math.random() * 3) + 1, + })); +}; + +const generateEndpoints = (): TEndpoint[] => [ + {Name: 'ic', Address: ':19001'}, + {Name: 'http-mon', Address: ':8765'}, + {Name: 'grpcs', Address: ':2135'}, + {Name: 'grpc', Address: ':2136'}, +]; + +const generateSystemState = (nodeId: number): TSystemStateInfo => ({ + StartTime: '1734358137851', + ChangeTime: '1734358421375', + LoadAverage: [3.381347656, 2.489257813, 1.279296875], + NumberOfCpus: 8, + SystemState: EFlag.Green, + NodeId: nodeId, + Host: `localhost-${nodeId}`, + Version: 'main.95ce0df', + PoolStats: generatePoolStats(), + Endpoints: generateEndpoints(), + Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], + MemoryLimit: '2147483648', + MaxDiskUsage: 0.002349853516, + Location: { + DataCenter: '1', + Rack: '1', + Unit: '1', + }, + TotalSessions: 0, + CoresUsed: 0.07583969556, + CoresTotal: 8, +}); + +const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ + PDiskId: pdiskId, + ChangeTime: '1734358142074', + Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, + Guid: pdiskId.toString(), + Category: '0', + TotalSize: totalSize, + AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default + State: TPDiskState.Normal, + NodeId: nodeId, + Device: EFlag.Green, + Realtime: EFlag.Green, + SerialNumber: '', + SystemSize: '213909504', + LogUsedSize: '35651584', + LogTotalSize: '68486692864', + EnforcedDynamicSlotSize: '22817013760', +}); + +const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ + VDiskId: { + GroupID: vdiskId, + GroupGeneration: 1, + Ring: 0, + Domain: 0, + VDisk: 0, + }, + ChangeTime: '1734358420919', + PDiskId: pdiskId, + VDiskSlotId: vdiskId, + Guid: '1', + Kind: '0', + NodeId: nodeId, + VDiskState: EVDiskState.OK, + DiskSpace: EFlag.Green, + SatisfactionRank: { + FreshRank: { + Flag: EFlag.Green, + }, + LevelRank: { + Flag: EFlag.Green, + }, + }, + Replicated: true, + ReplicationProgress: 1, + ReplicationSecondsRemaining: 0, + AllocatedSize: '0', + AvailableSize: '22817013760', + HasUnreadableBlobs: false, + IncarnationGuid: '11528832187803248876', + InstanceGuid: '14836434871903384493', + FrontQueues: EFlag.Green, + StoragePoolName: 'static', + ReadThroughput: '0', + WriteThroughput: '420', +}); + +interface NodeGeneratorOptions { + maxVdisksPerPDisk?: number; + maxPdisks?: number; +} + +const DEFAULT_OPTIONS: NodeGeneratorOptions = { + maxVdisksPerPDisk: 3, + maxPdisks: 4, +}; + +const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { + const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; + const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; + + // Generate a random number of pdisks up to maxPdisks + const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; + + // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk + const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => + Math.floor(Math.random() * maxVdisksPerPDisk), + ); + const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); + + return { + NodeId: nodeId, + UptimeSeconds: 284, + CpuUsage: 0.00947996, + DiskSpaceUsage: 0.234985, + SystemState: generateSystemState(nodeId), + PDisks: Array.from({length: pdisksCount}, (_, i) => + generatePDisk(nodeId, i + 1, getRandomDiskSize()), + ), + VDisks: Array.from({length: totalVdisks}, (_, i) => { + // Find which pdisk this vdisk belongs to based on the distribution + let pdiskIndex = 0; + let vdiskCount = pdiskVdisksCounts[0]; + while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { + pdiskIndex++; + vdiskCount += pdiskVdisksCounts[pdiskIndex]; + } + return generateVDisk(nodeId, i, pdiskIndex + 1); + }), + }; +}; + +interface GenerateNodesOptions extends NodeGeneratorOptions { + offset?: number; + limit?: number; +} + +// Keep a cache of generated nodes to maintain consistency between paginated requests +let cachedNodes: TNodeInfo[] | null = null; +let currentTotalNodes = 50; // Default number of nodes + +export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { + const totalNodes = count ?? currentTotalNodes; + const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; + + // Reset cache if total nodes count changes + if (totalNodes !== currentTotalNodes) { + cachedNodes = null; + currentTotalNodes = totalNodes; + } + + // Generate or use cached nodes + if (!cachedNodes) { + cachedNodes = Array.from({length: totalNodes}, (_, i) => + generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), + ); + } + + // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes + let maxSlotsPerDisk = 0; + let maxDisksPerNode = 0; + + cachedNodes.forEach((node) => { + // Count pdisks per node + if (node.PDisks) { + maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); + } + + // Count vdisks per pdisk + if (node.VDisks) { + const pdiskVdiskCounts = new Map(); + node.VDisks.forEach((vdisk) => { + if (typeof vdisk.PDiskId === 'number') { + const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; + pdiskVdiskCounts.set(vdisk.PDiskId, count); + maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); + } + }); + } + }); + + // Get the requested slice of nodes + const paginatedNodes = cachedNodes.slice(offset, offset + limit); + + return { + TotalNodes: totalNodes.toString(), + FoundNodes: totalNodes.toString(), + Nodes: paginatedNodes, + MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), + MaximumDisksPerNode: maxDisksPerNode.toString(), + }; +}; From aac8f0eddb88ea021cb2eee96e80da68c03ff666 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 15:04:55 +0300 Subject: [PATCH 15/21] fix: merge fetched chunks --- .../PaginatedTable/PaginatedTable.tsx | 19 ++++++++++--------- src/components/PaginatedTable/TableChunk.tsx | 8 +------- .../PaginatedTable/useScrollBasedChunks.ts | 2 +- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 7a846d1d4..5b014cbf5 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -37,7 +37,7 @@ export interface PaginatedTableProps { keepCache?: boolean; } -const DEFAULT_PAGINATION_LIMIT = 20; +const DEFAULT_PAGINATION_LIMIT = 10; export const PaginatedTable = ({ limit: chunkSize = DEFAULT_PAGINATION_LIMIT, @@ -142,7 +142,6 @@ export const PaginatedTable = ({ const isActive = shouldRender || shouldFetch; if (isActive) { - // Render active chunk normally chunks.push( key={i} @@ -164,26 +163,28 @@ export const PaginatedTable = ({ keepCache={keepCache} />, ); + } + + if (shouldRender) { i++; } else { // Find consecutive inactive chunks and merge them const startIndex = i; let totalHeight = 0; - while ( - i < chunkStates.length && - !chunkStates[i].shouldRender && - !chunkStates[i].shouldFetch - ) { + while (i < chunkStates.length && !chunkStates[i].shouldRender) { const currentChunkSize = i === chunkStates.length - 1 ? lastChunkSize : chunkSize; totalHeight += currentChunkSize * rowHeight; i++; } - // Render merged empty tbody for consecutive inactive chunks + // Render merged separator for consecutive inactive chunks chunks.push( - + , ); diff --git a/src/components/PaginatedTable/TableChunk.tsx b/src/components/PaginatedTable/TableChunk.tsx index aed31a261..bc122bd04 100644 --- a/src/components/PaginatedTable/TableChunk.tsx +++ b/src/components/PaginatedTable/TableChunk.tsx @@ -151,11 +151,5 @@ export const TableChunk = typedMemo(function TableChunk({ )); }; - return shouldRender ? ( - renderContent() - ) : ( - - - - ); + return shouldRender ? renderContent() : null; }); diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index d6aa1c55d..2db98e5b3 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -21,7 +21,7 @@ interface ChunkState { const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent); // Bad performance in Safari - reduce overscan counts -const DEFAULT_RENDER_OVERSCAN = isSafari ? 1 : 2; +const DEFAULT_RENDER_OVERSCAN = isSafari ? 0 : 0; const DEFAULT_FETCH_OVERSCAN = 4; export const useScrollBasedChunks = ({ From dbca82f4fdef455c0fface2ab70648ae0e0e3da9 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 15:25:13 +0300 Subject: [PATCH 16/21] fix: increase pagination limit --- src/components/PaginatedTable/PaginatedTable.tsx | 3 ++- src/components/PaginatedTable/useScrollBasedChunks.ts | 4 +--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 5b014cbf5..0a7744094 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -37,7 +37,7 @@ export interface PaginatedTableProps { keepCache?: boolean; } -const DEFAULT_PAGINATION_LIMIT = 10; +const DEFAULT_PAGINATION_LIMIT = 20; export const PaginatedTable = ({ limit: chunkSize = DEFAULT_PAGINATION_LIMIT, @@ -183,6 +183,7 @@ export const PaginatedTable = ({ chunks.push( diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index 2db98e5b3..b2322f9a5 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -18,10 +18,8 @@ interface ChunkState { shouldFetch: boolean; } -const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent); - // Bad performance in Safari - reduce overscan counts -const DEFAULT_RENDER_OVERSCAN = isSafari ? 0 : 0; +const DEFAULT_RENDER_OVERSCAN = 2; const DEFAULT_FETCH_OVERSCAN = 4; export const useScrollBasedChunks = ({ From 0027678c74e3cb14ee08b0ba9344f0b2e23f01f0 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 17:24:21 +0300 Subject: [PATCH 17/21] fix: overscans --- src/components/PaginatedTable/useScrollBasedChunks.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index b2322f9a5..d6aa1c55d 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -18,8 +18,10 @@ interface ChunkState { shouldFetch: boolean; } +const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent); + // Bad performance in Safari - reduce overscan counts -const DEFAULT_RENDER_OVERSCAN = 2; +const DEFAULT_RENDER_OVERSCAN = isSafari ? 1 : 2; const DEFAULT_FETCH_OVERSCAN = 4; export const useScrollBasedChunks = ({ From e89ee943fbeefbbf4a6d6d409af93469ed92a840 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 17:25:17 +0300 Subject: [PATCH 18/21] Revert "Revert "Revert "Revert "Revert "Revert "Revert "Revert "fix: remove mocks"""""""" This reverts commit 2b45f1c825b07f27e7dbf1aea3d165cee2e2960a. --- .../PaginatedStorageNodesTable/getNodes.ts | 50 ++-- .../PaginatedStorageNodesTable/nodes.ts | 224 ------------------ 2 files changed, 17 insertions(+), 257 deletions(-) delete mode 100644 src/containers/Storage/PaginatedStorageNodesTable/nodes.ts diff --git a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts index c8ed3d393..c8cc7d7f5 100644 --- a/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts +++ b/src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts @@ -13,8 +13,6 @@ import {prepareSortValue} from '../../../utils/filters'; import {getUptimeParamValue} from '../../../utils/nodes'; import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields'; -import {generateNodes} from './nodes'; - export const getStorageNodes: FetchData< PreparedStorageNode, PreparedStorageNodeFilters, @@ -45,37 +43,23 @@ export const getStorageNodes: FetchData< const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined; const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS); - let response; - const urlParams = new URLSearchParams(window.location.search); - if (urlParams.get('mocks')) { - // Get mock configuration from URL parameters or use defaults - const pdisks = parseInt(urlParams.get('pdisks') || '10', 10); - const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10); - const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10); - response = generateNodes(totalNodes, { - maxVdisksPerPDisk: vdisksPerPDisk, - maxPdisks: pdisks, - offset, - limit, - }); - } else { - response = await window.api.viewer.getNodes({ - type, - storage, - limit, - offset, - sort, - filter: searchValue, - uptime: getUptimeParamValue(nodesUptimeFilter), - with: visibleEntities, - database, - node_id: nodeId, - group_id: groupId, - filter_group: filterGroup, - filter_group_by: filterGroupBy, - fieldsRequired: dataFieldsRequired, - }); - } + + const response = await window.api.viewer.getNodes({ + type, + storage, + limit, + offset, + sort, + filter: searchValue, + uptime: getUptimeParamValue(nodesUptimeFilter), + with: visibleEntities, + database, + node_id: nodeId, + group_id: groupId, + filter_group: filterGroup, + filter_group_by: filterGroupBy, + fieldsRequired: dataFieldsRequired, + }); const preparedResponse = prepareStorageNodesResponse(response); return { data: preparedResponse.nodes || [], diff --git a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts b/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts deleted file mode 100644 index 1c4cfe187..000000000 --- a/src/containers/Storage/PaginatedStorageNodesTable/nodes.ts +++ /dev/null @@ -1,224 +0,0 @@ -import {EFlag} from '../../../types/api/enums'; -import type { - TEndpoint, - TNodeInfo, - TNodesInfo, - TPoolStats, - TSystemStateInfo, -} from '../../../types/api/nodes'; -import {TPDiskState} from '../../../types/api/pdisk'; -import {EVDiskState} from '../../../types/api/vdisk'; - -// Different disk sizes to simulate variety (in bytes) -const DISK_SIZES = [ - '68719476736', // 64 GB - '137438953472', // 128 GB - '274877906944', // 256 GB - '549755813888', // 512 GB - '1099511627776', // 1 TB -]; - -const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)]; - -const generatePoolStats = (count = 5): TPoolStats[] => { - const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const; - return poolNames.slice(0, count).map((Name) => ({ - Name, - Usage: Math.random() * 0.02, - Threads: Math.floor(Math.random() * 3) + 1, - })); -}; - -const generateEndpoints = (): TEndpoint[] => [ - {Name: 'ic', Address: ':19001'}, - {Name: 'http-mon', Address: ':8765'}, - {Name: 'grpcs', Address: ':2135'}, - {Name: 'grpc', Address: ':2136'}, -]; - -const generateSystemState = (nodeId: number): TSystemStateInfo => ({ - StartTime: '1734358137851', - ChangeTime: '1734358421375', - LoadAverage: [3.381347656, 2.489257813, 1.279296875], - NumberOfCpus: 8, - SystemState: EFlag.Green, - NodeId: nodeId, - Host: `localhost-${nodeId}`, - Version: 'main.95ce0df', - PoolStats: generatePoolStats(), - Endpoints: generateEndpoints(), - Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'], - MemoryLimit: '2147483648', - MaxDiskUsage: 0.002349853516, - Location: { - DataCenter: '1', - Rack: '1', - Unit: '1', - }, - TotalSessions: 0, - CoresUsed: 0.07583969556, - CoresTotal: 8, -}); - -const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({ - PDiskId: pdiskId, - ChangeTime: '1734358142074', - Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`, - Guid: pdiskId.toString(), - Category: '0', - TotalSize: totalSize, - AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default - State: TPDiskState.Normal, - NodeId: nodeId, - Device: EFlag.Green, - Realtime: EFlag.Green, - SerialNumber: '', - SystemSize: '213909504', - LogUsedSize: '35651584', - LogTotalSize: '68486692864', - EnforcedDynamicSlotSize: '22817013760', -}); - -const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({ - VDiskId: { - GroupID: vdiskId, - GroupGeneration: 1, - Ring: 0, - Domain: 0, - VDisk: 0, - }, - ChangeTime: '1734358420919', - PDiskId: pdiskId, - VDiskSlotId: vdiskId, - Guid: '1', - Kind: '0', - NodeId: nodeId, - VDiskState: EVDiskState.OK, - DiskSpace: EFlag.Green, - SatisfactionRank: { - FreshRank: { - Flag: EFlag.Green, - }, - LevelRank: { - Flag: EFlag.Green, - }, - }, - Replicated: true, - ReplicationProgress: 1, - ReplicationSecondsRemaining: 0, - AllocatedSize: '0', - AvailableSize: '22817013760', - HasUnreadableBlobs: false, - IncarnationGuid: '11528832187803248876', - InstanceGuid: '14836434871903384493', - FrontQueues: EFlag.Green, - StoragePoolName: 'static', - ReadThroughput: '0', - WriteThroughput: '420', -}); - -interface NodeGeneratorOptions { - maxVdisksPerPDisk?: number; - maxPdisks?: number; -} - -const DEFAULT_OPTIONS: NodeGeneratorOptions = { - maxVdisksPerPDisk: 3, - maxPdisks: 4, -}; - -const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => { - const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!; - const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!; - - // Generate a random number of pdisks up to maxPdisks - const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1; - - // For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk - const pdiskVdisksCounts = Array.from({length: pdisksCount}, () => - Math.floor(Math.random() * maxVdisksPerPDisk), - ); - const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0); - - return { - NodeId: nodeId, - UptimeSeconds: 284, - CpuUsage: 0.00947996, - DiskSpaceUsage: 0.234985, - SystemState: generateSystemState(nodeId), - PDisks: Array.from({length: pdisksCount}, (_, i) => - generatePDisk(nodeId, i + 1, getRandomDiskSize()), - ), - VDisks: Array.from({length: totalVdisks}, (_, i) => { - // Find which pdisk this vdisk belongs to based on the distribution - let pdiskIndex = 0; - let vdiskCount = pdiskVdisksCounts[0]; - while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) { - pdiskIndex++; - vdiskCount += pdiskVdisksCounts[pdiskIndex]; - } - return generateVDisk(nodeId, i, pdiskIndex + 1); - }), - }; -}; - -interface GenerateNodesOptions extends NodeGeneratorOptions { - offset?: number; - limit?: number; -} - -// Keep a cache of generated nodes to maintain consistency between paginated requests -let cachedNodes: TNodeInfo[] | null = null; -let currentTotalNodes = 50; // Default number of nodes - -export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => { - const totalNodes = count ?? currentTotalNodes; - const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options; - - // Reset cache if total nodes count changes - if (totalNodes !== currentTotalNodes) { - cachedNodes = null; - currentTotalNodes = totalNodes; - } - - // Generate or use cached nodes - if (!cachedNodes) { - cachedNodes = Array.from({length: totalNodes}, (_, i) => - generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}), - ); - } - - // Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes - let maxSlotsPerDisk = 0; - let maxDisksPerNode = 0; - - cachedNodes.forEach((node) => { - // Count pdisks per node - if (node.PDisks) { - maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length); - } - - // Count vdisks per pdisk - if (node.VDisks) { - const pdiskVdiskCounts = new Map(); - node.VDisks.forEach((vdisk) => { - if (typeof vdisk.PDiskId === 'number') { - const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1; - pdiskVdiskCounts.set(vdisk.PDiskId, count); - maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count); - } - }); - } - }); - - // Get the requested slice of nodes - const paginatedNodes = cachedNodes.slice(offset, offset + limit); - - return { - TotalNodes: totalNodes.toString(), - FoundNodes: totalNodes.toString(), - Nodes: paginatedNodes, - MaximumSlotsPerDisk: maxSlotsPerDisk.toString(), - MaximumDisksPerNode: maxDisksPerNode.toString(), - }; -}; From b86a4f066d794546e7b456e8656e3fd7f5c81ab7 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 18:51:00 +0300 Subject: [PATCH 19/21] fix: chunks logics --- .../PaginatedTable/PaginatedTable.tsx | 106 ++-------- .../PaginatedTable/TableChunksRenderer.tsx | 191 ++++++++++++++++++ 2 files changed, 212 insertions(+), 85 deletions(-) create mode 100644 src/components/PaginatedTable/TableChunksRenderer.tsx diff --git a/src/components/PaginatedTable/PaginatedTable.tsx b/src/components/PaginatedTable/PaginatedTable.tsx index 0a7744094..670f907d0 100644 --- a/src/components/PaginatedTable/PaginatedTable.tsx +++ b/src/components/PaginatedTable/PaginatedTable.tsx @@ -1,7 +1,7 @@ import React from 'react'; import {usePaginatedTableState} from './PaginatedTableContext'; -import {TableChunk} from './TableChunk'; +import {TableChunksRenderer} from './TableChunksRenderer'; import {TableHead} from './TableHead'; import {DEFAULT_TABLE_ROW_HEIGHT} from './constants'; import {b} from './shared'; @@ -14,7 +14,6 @@ import type { RenderEmptyDataMessage, RenderErrorMessage, } from './types'; -import {useScrollBasedChunks} from './useScrollBasedChunks'; import {calculateElementOffsetTop} from './utils'; import './PaginatedTable.scss'; @@ -65,15 +64,6 @@ export const PaginatedTable = ({ const tableRef = React.useRef(null); const [tableOffset, setTableOffset] = React.useState(0); - const chunkStates = useScrollBasedChunks({ - scrollContainerRef, - tableRef, - totalItems: foundEntities, - rowHeight, - chunkSize, - tableOffset, - }); - // this prevent situation when filters are new, but active chunks is not yet recalculated (it will be done to the next rendrer, so we bring filters change on the next render too) const [filters, setFilters] = React.useState(rawFilters); @@ -81,15 +71,6 @@ export const PaginatedTable = ({ setFilters(rawFilters); }, [rawFilters]); - const lastChunkSize = React.useMemo(() => { - // If foundEntities = 0, there will only first chunk - // Display it with 1 row, to display empty data message - if (!foundEntities) { - return 1; - } - return foundEntities % chunkSize || chunkSize; - }, [foundEntities, chunkSize]); - const handleDataFetched = React.useCallback( (data?: PaginatedTableData) => { if (data) { @@ -131,74 +112,29 @@ export const PaginatedTable = ({ setIsInitialLoad(true); }, [initialEntitiesCount, setTotalEntities, setFoundEntities, setIsInitialLoad]); - const renderChunks = () => { - const chunks: React.ReactElement[] = []; - let i = 0; - - while (i < chunkStates.length) { - const chunkState = chunkStates[i]; - const shouldRender = chunkState.shouldRender; - const shouldFetch = chunkState.shouldFetch; - const isActive = shouldRender || shouldFetch; - - if (isActive) { - chunks.push( - - key={i} - id={i} - calculatedCount={i === chunkStates.length - 1 ? lastChunkSize : chunkSize} - chunkSize={chunkSize} - rowHeight={rowHeight} - columns={columns} - fetchData={fetchData} - filters={filters} - tableName={tableName} - sortParams={sortParams} - getRowClassName={getRowClassName} - renderErrorMessage={renderErrorMessage} - renderEmptyDataMessage={renderEmptyDataMessage} - onDataFetched={handleDataFetched} - shouldFetch={chunkState.shouldFetch} - shouldRender={chunkState.shouldRender} - keepCache={keepCache} - />, - ); - } - - if (shouldRender) { - i++; - } else { - // Find consecutive inactive chunks and merge them - const startIndex = i; - let totalHeight = 0; - - while (i < chunkStates.length && !chunkStates[i].shouldRender) { - const currentChunkSize = - i === chunkStates.length - 1 ? lastChunkSize : chunkSize; - totalHeight += currentChunkSize * rowHeight; - i++; - } - - // Render merged separator for consecutive inactive chunks - chunks.push( - - - , - ); - } - } - - return chunks; - }; - const renderTable = () => ( - {renderChunks()} + + +
); diff --git a/src/components/PaginatedTable/TableChunksRenderer.tsx b/src/components/PaginatedTable/TableChunksRenderer.tsx new file mode 100644 index 000000000..070e40496 --- /dev/null +++ b/src/components/PaginatedTable/TableChunksRenderer.tsx @@ -0,0 +1,191 @@ +import React from 'react'; + +import {TableChunk} from './TableChunk'; +import {b} from './shared'; +import type { + Column, + FetchData, + GetRowClassName, + PaginatedTableData, + RenderEmptyDataMessage, + RenderErrorMessage, + SortParams, +} from './types'; +import {useScrollBasedChunks} from './useScrollBasedChunks'; + +export interface TableChunksRendererProps { + scrollContainerRef: React.RefObject; + tableRef: React.RefObject; + foundEntities: number; + tableOffset: number; + chunkSize: number; + rowHeight: number; + columns: Column[]; + fetchData: FetchData; + filters?: F; + tableName: string; + sortParams?: SortParams; + getRowClassName?: GetRowClassName; + renderErrorMessage?: RenderErrorMessage; + renderEmptyDataMessage?: RenderEmptyDataMessage; + onDataFetched: (data?: PaginatedTableData) => void; + keepCache: boolean; +} + +export const TableChunksRenderer = ({ + scrollContainerRef, + tableRef, + foundEntities, + tableOffset, + chunkSize, + rowHeight, + columns, + fetchData, + filters, + tableName, + sortParams, + getRowClassName, + renderErrorMessage, + renderEmptyDataMessage, + onDataFetched, + keepCache, +}: TableChunksRendererProps) => { + const chunkStates = useScrollBasedChunks({ + scrollContainerRef, + tableRef, + totalItems: foundEntities || 1, + rowHeight, + chunkSize, + tableOffset, + }); + + const lastChunkSize = React.useMemo(() => { + // If foundEntities = 0, there will only first chunk + // Display it with 1 row, to display empty data message + if (!foundEntities) { + return 1; + } + return foundEntities % chunkSize || chunkSize; + }, [foundEntities, chunkSize]); + + const findRenderChunkRange = React.useCallback(() => { + const firstRenderIndex = chunkStates.findIndex((state) => state.shouldRender); + const lastRenderIndex = chunkStates.findLastIndex((state) => state.shouldRender); + return {firstRenderIndex, lastRenderIndex}; + }, [chunkStates]); + + const findFetchChunkRange = React.useCallback(() => { + const firstFetchIndex = chunkStates.findIndex((state) => state.shouldFetch); + const lastFetchIndex = chunkStates.findLastIndex((state) => state.shouldFetch); + return {firstFetchIndex, lastFetchIndex}; + }, [chunkStates]); + + const calculateSeparatorHeight = React.useCallback( + (startIndex: number, endIndex: number) => { + let totalHeight = 0; + for (let i = startIndex; i < endIndex; i++) { + const currentChunkSize = i === chunkStates.length - 1 ? lastChunkSize : chunkSize; + totalHeight += currentChunkSize * rowHeight; + } + return totalHeight; + }, + [chunkSize, chunkStates.length, lastChunkSize, rowHeight], + ); + + const createSeparator = React.useCallback( + (startIndex: number, endIndex: number, key: string) => { + const height = calculateSeparatorHeight(startIndex, endIndex); + return ( + + + + ); + }, + [calculateSeparatorHeight, columns.length], + ); + + const createChunk = React.useCallback( + (chunkIndex: number) => { + const chunkState = chunkStates[chunkIndex]; + return ( + + key={chunkIndex} + id={chunkIndex} + calculatedCount={ + chunkIndex === chunkStates.length - 1 ? lastChunkSize : chunkSize + } + chunkSize={chunkSize} + rowHeight={rowHeight} + columns={columns} + fetchData={fetchData} + filters={filters} + tableName={tableName} + sortParams={sortParams} + getRowClassName={getRowClassName} + renderErrorMessage={renderErrorMessage} + renderEmptyDataMessage={renderEmptyDataMessage} + onDataFetched={onDataFetched} + shouldFetch={chunkState.shouldFetch} + shouldRender={chunkState.shouldRender} + keepCache={keepCache} + /> + ); + }, + [ + chunkSize, + chunkStates, + columns, + fetchData, + filters, + getRowClassName, + keepCache, + lastChunkSize, + onDataFetched, + renderEmptyDataMessage, + renderErrorMessage, + rowHeight, + sortParams, + tableName, + ], + ); + + const renderChunks = React.useCallback(() => { + // Chunk states are distrubuted like [fetch, fetch, render/fetch, render/fetch, fetch, fetch] + // i.e. fetched chunks include rendered chunks + const {firstFetchIndex, lastFetchIndex} = findFetchChunkRange(); + const {firstRenderIndex, lastRenderIndex} = findRenderChunkRange(); + const elements: React.ReactElement[] = []; + + // No fetch chunks found + if (firstFetchIndex === -1) { + return elements; + } + + // Beginning separator (for chunks before first render chunk) + if (firstRenderIndex > 0) { + elements.push(createSeparator(0, firstRenderIndex, 'separator-beginning')); + } + + // All fetch chunks (shouldFetch = true) get rendered as TableChunk components + for (let i = firstFetchIndex; i <= lastFetchIndex; i++) { + elements.push(createChunk(i)); + } + + // End separator (for chunks after last render chunk) + if (lastRenderIndex < chunkStates.length - 1) { + elements.push( + createSeparator(lastRenderIndex + 1, chunkStates.length, 'separator-end'), + ); + } + + return elements; + }, [ + chunkStates.length, + createChunk, + createSeparator, + findFetchChunkRange, + findRenderChunkRange, + ]); + + return {renderChunks()}; +}; From 91601cda1a267dd9ef641c588bba149c1fc815be Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 19:28:18 +0300 Subject: [PATCH 20/21] fix: slight nanofixes --- src/components/PaginatedTable/TableChunksRenderer.tsx | 2 +- src/components/PaginatedTable/useScrollBasedChunks.ts | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/components/PaginatedTable/TableChunksRenderer.tsx b/src/components/PaginatedTable/TableChunksRenderer.tsx index 070e40496..c819fb348 100644 --- a/src/components/PaginatedTable/TableChunksRenderer.tsx +++ b/src/components/PaginatedTable/TableChunksRenderer.tsx @@ -150,7 +150,7 @@ export const TableChunksRenderer = ({ ); const renderChunks = React.useCallback(() => { - // Chunk states are distrubuted like [fetch, fetch, render/fetch, render/fetch, fetch, fetch] + // Chunk states are distrubuted like [null, null, fetch, fetch, render+fetch, render+fetch, fetch, fetch, null, null] // i.e. fetched chunks include rendered chunks const {firstFetchIndex, lastFetchIndex} = findFetchChunkRange(); const {firstRenderIndex, lastRenderIndex} = findRenderChunkRange(); diff --git a/src/components/PaginatedTable/useScrollBasedChunks.ts b/src/components/PaginatedTable/useScrollBasedChunks.ts index d6aa1c55d..ea82a61ed 100644 --- a/src/components/PaginatedTable/useScrollBasedChunks.ts +++ b/src/components/PaginatedTable/useScrollBasedChunks.ts @@ -1,5 +1,7 @@ import React from 'react'; +import {throttle} from 'lodash'; + import {rafThrottle} from './utils'; interface UseScrollBasedChunksProps { @@ -23,6 +25,7 @@ const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent); // Bad performance in Safari - reduce overscan counts const DEFAULT_RENDER_OVERSCAN = isSafari ? 1 : 2; const DEFAULT_FETCH_OVERSCAN = 4; +const THROTTLE_DELAY = 200; export const useScrollBasedChunks = ({ scrollContainerRef, @@ -95,7 +98,10 @@ export const useScrollBasedChunks = ({ return undefined; } - const throttledHandleScroll = rafThrottle(handleScroll); + const throttledHandleScroll = throttle(handleScroll, THROTTLE_DELAY, { + trailing: true, + leading: true, + }); container.addEventListener('scroll', throttledHandleScroll, {passive: true}); return () => { From ee789c047e146cd7f01eeda7b2ad4478d5f5f9b2 Mon Sep 17 00:00:00 2001 From: Anton Standrik Date: Thu, 29 May 2025 21:29:51 +0300 Subject: [PATCH 21/21] fix: tests --- tests/suites/paginatedTable/paginatedTable.test.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/suites/paginatedTable/paginatedTable.test.ts b/tests/suites/paginatedTable/paginatedTable.test.ts index 7bbdd8d70..fca5975e4 100644 --- a/tests/suites/paginatedTable/paginatedTable.test.ts +++ b/tests/suites/paginatedTable/paginatedTable.test.ts @@ -20,7 +20,10 @@ test.describe('PaginatedTable', () => { // Get initial row count (should be first chunk) const initialVisibleRows = await paginatedTable.getRowCount(); - expect(initialVisibleRows).toEqual(40); // Should not show all rows initially + + // Safari shows 40 rows initially (1 + 1 overscan), other browsers show 60 (1 + 2 overscan) + const expectedRows = page.context().browser()?.browserType().name() === 'webkit' ? 40 : 60; + expect(initialVisibleRows).toEqual(expectedRows); // Should not show all rows initially // Get data from first visible row to verify initial chunk const firstRowData = await paginatedTable.getRowData(0);