|
| 1 | +// Copyright (C) 2024 Intel Corporation |
| 2 | +// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. |
| 3 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 4 | + |
| 5 | +#ifndef UMF_MEMSPACE_FIXTURES_HPP |
| 6 | +#define UMF_MEMSPACE_FIXTURES_HPP |
| 7 | + |
| 8 | +#include "base.hpp" |
| 9 | +#include "memspace_helpers.hpp" |
| 10 | +#include "test_helpers.h" |
| 11 | + |
| 12 | +#include <hwloc.h> |
| 13 | +#include <numa.h> |
| 14 | +#include <numaif.h> |
| 15 | +#include <thread> |
| 16 | +#include <umf/memspace.h> |
| 17 | + |
| 18 | +#define SIZE_4K (4096UL) |
| 19 | +#define SIZE_4M (SIZE_4K * 1024UL) |
| 20 | + |
| 21 | +// In HWLOC v2.3.0, the 'hwloc_location_type_e' enum is defined inside an |
| 22 | +// 'hwloc_location' struct. In newer versions, this enum is defined globally. |
| 23 | +// To prevent compile errors in C++ tests related this scope change |
| 24 | +// 'hwloc_location_type_e' has been aliased. |
| 25 | +using hwloc_location_type_alias = decltype(hwloc_location::type); |
| 26 | + |
| 27 | +struct numaNodesTest : ::umf_test::test { |
| 28 | + void SetUp() override { |
| 29 | + ::umf_test::test::SetUp(); |
| 30 | + |
| 31 | + if (numa_available() == -1 || numa_all_nodes_ptr == nullptr) { |
| 32 | + GTEST_FAIL() << "Failed to initialize libnuma"; |
| 33 | + } |
| 34 | + |
| 35 | + int maxNode = numa_max_node(); |
| 36 | + if (maxNode < 0) { |
| 37 | + GTEST_FAIL() << "No available numa nodes"; |
| 38 | + } |
| 39 | + |
| 40 | + for (int i = 0; i <= maxNode; i++) { |
| 41 | + if (numa_bitmask_isbitset(numa_all_nodes_ptr, i)) { |
| 42 | + nodeIds.emplace_back(i); |
| 43 | + maxNodeId = i; |
| 44 | + } |
| 45 | + } |
| 46 | + } |
| 47 | + |
| 48 | + std::vector<unsigned> nodeIds; |
| 49 | + unsigned long maxNodeId = 0; |
| 50 | +}; |
| 51 | + |
| 52 | +using isQuerySupportedFunc = bool (*)(size_t); |
| 53 | +using memspaceGetFunc = umf_memspace_handle_t (*)(); |
| 54 | +using memspaceGetParams = std::tuple<isQuerySupportedFunc, memspaceGetFunc>; |
| 55 | + |
| 56 | +struct memspaceGetTest : ::numaNodesTest, |
| 57 | + ::testing::WithParamInterface<memspaceGetParams> { |
| 58 | + void SetUp() override { |
| 59 | + ::numaNodesTest::SetUp(); |
| 60 | + |
| 61 | + auto [isQuerySupported, memspaceGet] = this->GetParam(); |
| 62 | + |
| 63 | + if (!isQuerySupported(nodeIds.front())) { |
| 64 | + GTEST_SKIP(); |
| 65 | + } |
| 66 | + |
| 67 | + hMemspace = memspaceGet(); |
| 68 | + ASSERT_NE(hMemspace, nullptr); |
| 69 | + } |
| 70 | + |
| 71 | + umf_memspace_handle_t hMemspace = nullptr; |
| 72 | +}; |
| 73 | + |
| 74 | +struct memspaceProviderTest : ::memspaceGetTest { |
| 75 | + void SetUp() override { |
| 76 | + ::memspaceGetTest::SetUp(); |
| 77 | + |
| 78 | + if (::memspaceGetTest::IsSkipped()) { |
| 79 | + GTEST_SKIP(); |
| 80 | + } |
| 81 | + |
| 82 | + umf_result_t ret = |
| 83 | + umfMemoryProviderCreateFromMemspace(hMemspace, nullptr, &hProvider); |
| 84 | + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
| 85 | + ASSERT_NE(hProvider, nullptr); |
| 86 | + } |
| 87 | + |
| 88 | + void TearDown() override { |
| 89 | + ::memspaceGetTest::TearDown(); |
| 90 | + |
| 91 | + if (hProvider) { |
| 92 | + umfMemoryProviderDestroy(hProvider); |
| 93 | + } |
| 94 | + } |
| 95 | + |
| 96 | + umf_memory_provider_handle_t hProvider = nullptr; |
| 97 | +}; |
| 98 | + |
| 99 | +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(memspaceGetTest); |
| 100 | +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(memspaceProviderTest); |
| 101 | + |
| 102 | +TEST_P(memspaceGetTest, providerFromMemspace) { |
| 103 | + umf_memory_provider_handle_t hProvider = nullptr; |
| 104 | + umf_result_t ret = |
| 105 | + umfMemoryProviderCreateFromMemspace(hMemspace, nullptr, &hProvider); |
| 106 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 107 | + UT_ASSERTne(hProvider, nullptr); |
| 108 | + |
| 109 | + umfMemoryProviderDestroy(hProvider); |
| 110 | +} |
| 111 | + |
| 112 | +TEST_P(memspaceProviderTest, allocFree) { |
| 113 | + void *ptr = nullptr; |
| 114 | + size_t size = SIZE_4K; |
| 115 | + size_t alignment = 0; |
| 116 | + |
| 117 | + umf_result_t ret = umfMemoryProviderAlloc(hProvider, size, alignment, &ptr); |
| 118 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 119 | + UT_ASSERTne(ptr, nullptr); |
| 120 | + |
| 121 | + // Access the allocation, so that all the pages associated with it are |
| 122 | + // allocated on some NUMA node. |
| 123 | + memset(ptr, 0xFF, size); |
| 124 | + |
| 125 | + ret = umfMemoryProviderFree(hProvider, ptr, size); |
| 126 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 127 | +} |
| 128 | + |
| 129 | +static std::vector<int> getAllCpus() { |
| 130 | + std::vector<int> allCpus; |
| 131 | + for (int i = 0; i < numa_num_possible_cpus(); ++i) { |
| 132 | + if (numa_bitmask_isbitset(numa_all_cpus_ptr, i)) { |
| 133 | + allCpus.push_back(i); |
| 134 | + } |
| 135 | + } |
| 136 | + |
| 137 | + return allCpus; |
| 138 | +} |
| 139 | + |
| 140 | +#define MAX_NODES 512 |
| 141 | + |
| 142 | +TEST_P(memspaceProviderTest, allocLocalMt) { |
| 143 | + auto pinAllocValidate = [&](umf_memory_provider_handle_t hProvider, |
| 144 | + int cpu) { |
| 145 | + hwloc_topology_t topology = NULL; |
| 146 | + UT_ASSERTeq(hwloc_topology_init(&topology), 0); |
| 147 | + UT_ASSERTeq(hwloc_topology_load(topology), 0); |
| 148 | + |
| 149 | + // Pin current thread to the provided CPU. |
| 150 | + hwloc_cpuset_t pinCpuset = hwloc_bitmap_alloc(); |
| 151 | + UT_ASSERTeq(hwloc_bitmap_set(pinCpuset, cpu), 0); |
| 152 | + UT_ASSERTeq( |
| 153 | + hwloc_set_cpubind(topology, pinCpuset, HWLOC_CPUBIND_THREAD), 0); |
| 154 | + |
| 155 | + // Confirm that the thread is pinned to the provided CPU. |
| 156 | + hwloc_cpuset_t curCpuset = hwloc_bitmap_alloc(); |
| 157 | + UT_ASSERTeq( |
| 158 | + hwloc_get_cpubind(topology, curCpuset, HWLOC_CPUBIND_THREAD), 0); |
| 159 | + UT_ASSERT(hwloc_bitmap_isequal(curCpuset, pinCpuset)); |
| 160 | + hwloc_bitmap_free(curCpuset); |
| 161 | + hwloc_bitmap_free(pinCpuset); |
| 162 | + |
| 163 | + // Allocate some memory. |
| 164 | + const size_t size = SIZE_4K; |
| 165 | + const size_t alignment = 0; |
| 166 | + void *ptr = nullptr; |
| 167 | + |
| 168 | + umf_result_t ret = |
| 169 | + umfMemoryProviderAlloc(hProvider, size, alignment, &ptr); |
| 170 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 171 | + UT_ASSERTne(ptr, nullptr); |
| 172 | + |
| 173 | + // Access the allocation, so that all the pages associated with it are |
| 174 | + // allocated on some NUMA node. |
| 175 | + memset(ptr, 0xFF, size); |
| 176 | + |
| 177 | + // Get the NUMA node responsible for this allocation. |
| 178 | + int mode = -1; |
| 179 | + std::vector<size_t> boundNodeIds; |
| 180 | + size_t allocNodeId = SIZE_MAX; |
| 181 | + getAllocationPolicy(ptr, maxNodeId, mode, boundNodeIds, allocNodeId); |
| 182 | + |
| 183 | + // Get the CPUs associated with the specified NUMA node. |
| 184 | + hwloc_obj_t allocNodeObj = |
| 185 | + hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, allocNodeId); |
| 186 | + |
| 187 | + unsigned nNodes = MAX_NODES; |
| 188 | + std::vector<hwloc_obj_t> localNodes(MAX_NODES); |
| 189 | + hwloc_location loc; |
| 190 | + loc.location.object = allocNodeObj, |
| 191 | + loc.type = hwloc_location_type_alias::HWLOC_LOCATION_TYPE_OBJECT; |
| 192 | + UT_ASSERTeq(hwloc_get_local_numanode_objs(topology, &loc, &nNodes, |
| 193 | + localNodes.data(), 0), |
| 194 | + 0); |
| 195 | + UT_ASSERT(nNodes <= MAX_NODES); |
| 196 | + |
| 197 | + // Confirm that the allocation from this thread was made to a local |
| 198 | + // NUMA node. |
| 199 | + UT_ASSERT(std::any_of(localNodes.begin(), localNodes.end(), |
| 200 | + [&allocNodeObj](hwloc_obj_t node) { |
| 201 | + return node == allocNodeObj; |
| 202 | + })); |
| 203 | + |
| 204 | + ret = umfMemoryProviderFree(hProvider, ptr, size); |
| 205 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 206 | + |
| 207 | + hwloc_topology_destroy(topology); |
| 208 | + }; |
| 209 | + |
| 210 | + const auto cpus = getAllCpus(); |
| 211 | + std::vector<std::thread> threads; |
| 212 | + for (auto cpu : cpus) { |
| 213 | + threads.emplace_back(pinAllocValidate, hProvider, cpu); |
| 214 | + } |
| 215 | + |
| 216 | + for (auto &thread : threads) { |
| 217 | + thread.join(); |
| 218 | + } |
| 219 | +} |
| 220 | + |
| 221 | +#endif /* UMF_MEMSPACE_FIXTURES_HPP */ |
0 commit comments