Skip to content

Commit b9088ad

Browse files
AdrianDanisCQ Bot
authored andcommitted
[kernel][vm] Remove ScanForZeroPages
The full scanning of all pages in all VMOs for zero pages is now just used by an informational query, with the actual zero page deduper using different mechanisms through the page queues. ScanForZeroPages and related methods represent unneeded complexity to maintain and test. Change-Id: I628b6dc854a3fe24562ef2a3a3fa8ae32de10e54 Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/742426 Fuchsia-Auto-Submit: Adrian Danis <adanis@google.com> Reviewed-by: Rasha Eqbal <rashaeqbal@google.com> Commit-Queue: Auto-Submit <auto-submit@fuchsia-infra.iam.gserviceaccount.com>
1 parent 1d7d51c commit b9088ad

File tree

8 files changed

+0
-228
lines changed

8 files changed

+0
-228
lines changed

zircon/kernel/vm/include/vm/vm_cow_pages.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -483,10 +483,6 @@ class VmCowPages final
483483
using AttributionCounts = VmObject::AttributionCounts;
484484
AttributionCounts AttributedPagesInRangeLocked(uint64_t offset, uint64_t len) const TA_REQ(lock_);
485485

486-
// Scans this cow pages range for zero pages and frees them if |reclaim| is set to true. Returns
487-
// the number of pages freed or scanned.
488-
uint32_t ScanForZeroPagesLocked(bool reclaim) TA_REQ(lock_);
489-
490486
enum class EvictionHintAction : uint8_t {
491487
Follow,
492488
Ignore,

zircon/kernel/vm/include/vm/vm_object.h

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -712,19 +712,6 @@ class VmObject : public VmHierarchyBase,
712712
// Detaches the underlying page source, if present. Can be called multiple times.
713713
virtual void DetachSource() {}
714714

715-
// Walks through every VMO, calls ScanForZeroPages on them, and returns the sum. This function is
716-
// very expensive and will hold the AllVmosLock mutex for the entire duration. Should not be
717-
// called casually or when it is not suitable to block operations against all other VMOs for an
718-
// extended period of time.
719-
static uint32_t ScanAllForZeroPages(bool reclaim);
720-
721-
// Scans for pages that could validly be de-duped/decommitted back to the zero page. If `reclaim`
722-
// is true the pages will actually be de-duped. In either case the number of found pages is
723-
// returned. It is expected that this would hold the VMOs lock for an extended period of time
724-
// and should only be called when it is suitable for block all VMO operations for an extended
725-
// period of time.
726-
virtual uint32_t ScanForZeroPages(bool reclaim) { return 0; }
727-
728715
protected:
729716
explicit VmObject(fbl::RefPtr<VmHierarchyState> hierarchy_state_ptr);
730717

zircon/kernel/vm/include/vm/vm_object_paged.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,6 @@ class VmObjectPaged final : public VmObject {
221221
zx_status_t CreateChildSlice(uint64_t offset, uint64_t size, bool copy_name,
222222
fbl::RefPtr<VmObject>* child_vmo) override;
223223

224-
uint32_t ScanForZeroPages(bool reclaim) override;
225-
226224
// Returns whether or not zero pages can be safely deduped from this VMO. Zero pages cannot be
227225
// deduped if the VMO is in use for kernel mappings, or if the pages cannot be accessed from the
228226
// physmap due to not being cached.

zircon/kernel/vm/scanner.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,6 @@ KCOUNTER(zero_scan_pages_scanned, "vm.scanner.zero_scan.total_pages_considered")
7777
KCOUNTER(zero_scan_pages_deduped, "vm.scanner.zero_scan.pages_deduped")
7878

7979
void scanner_print_stats() {
80-
uint64_t zero_pages = VmObject::ScanAllForZeroPages(false);
81-
printf("[SCAN]: Found %lu zero pages across all of memory\n", zero_pages);
8280
PageQueues::Counts queue_counts = pmm_page_queues()->QueueCounts();
8381
for (size_t i = 0; i < PageQueues::kNumReclaim; i++) {
8482
printf("[SCAN]: Found %lu reclaimable pages in queue %zu\n", queue_counts.reclaim[i], i);

zircon/kernel/vm/unittests/vmo_unittest.cc

Lines changed: 0 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -1313,82 +1313,6 @@ static bool vmo_clone_removes_write_test() {
13131313
END_TEST;
13141314
}
13151315

1316-
static bool vmo_zero_scan_test() {
1317-
BEGIN_TEST;
1318-
1319-
AutoVmScannerDisable scanner_disable;
1320-
1321-
auto mem = testing::UserMemory::Create(PAGE_SIZE);
1322-
ASSERT_NONNULL(mem);
1323-
1324-
const auto& user_aspace = mem->aspace();
1325-
ASSERT_NONNULL(user_aspace);
1326-
ASSERT_TRUE(user_aspace->is_user());
1327-
1328-
// Initially uncommitted, which should not count as having zero pages.
1329-
EXPECT_EQ(0u, mem->vmo()->ScanForZeroPages(false));
1330-
1331-
// Validate that this mapping reads as zeros
1332-
EXPECT_EQ(ZX_OK, user_aspace->SoftFault(mem->base(), 0u));
1333-
EXPECT_EQ(0, mem->get<int32_t>());
1334-
1335-
// Reading from the page should not have committed anything, zero or otherwise.
1336-
EXPECT_EQ(0u, mem->vmo()->ScanForZeroPages(false));
1337-
1338-
// IF we write to the page, this should make it committed.
1339-
EXPECT_EQ(ZX_OK, user_aspace->SoftFault(mem->base(), VMM_PF_FLAG_WRITE));
1340-
mem->put<int32_t>(0);
1341-
EXPECT_EQ(1u, mem->vmo()->ScanForZeroPages(false));
1342-
1343-
// Check that changing the contents effects the zero page count.
1344-
EXPECT_EQ(ZX_OK, user_aspace->SoftFault(mem->base(), VMM_PF_FLAG_WRITE));
1345-
mem->put<int32_t>(42);
1346-
EXPECT_EQ(0u, mem->vmo()->ScanForZeroPages(false));
1347-
EXPECT_EQ(ZX_OK, user_aspace->SoftFault(mem->base(), VMM_PF_FLAG_WRITE));
1348-
mem->put<int32_t>(0);
1349-
EXPECT_EQ(1u, mem->vmo()->ScanForZeroPages(false));
1350-
1351-
// Scanning should drop permissions in the hardware page table from write to read-only.
1352-
paddr_t paddr_readable;
1353-
uint mmu_flags;
1354-
EXPECT_EQ(ZX_OK, user_aspace->SoftFault(mem->base(), VMM_PF_FLAG_WRITE));
1355-
mem->put<int32_t>(0);
1356-
zx_status_t status = user_aspace->arch_aspace().Query(mem->base(), &paddr_readable, &mmu_flags);
1357-
EXPECT_EQ(ZX_OK, status);
1358-
EXPECT_TRUE(mmu_flags & ARCH_MMU_FLAG_PERM_WRITE);
1359-
mem->vmo()->ScanForZeroPages(false);
1360-
status = user_aspace->arch_aspace().Query(mem->base(), &paddr_readable, &mmu_flags);
1361-
EXPECT_EQ(ZX_OK, status);
1362-
EXPECT_FALSE(mmu_flags & ARCH_MMU_FLAG_PERM_WRITE);
1363-
1364-
// Pinning the page should prevent it from being counted.
1365-
EXPECT_EQ(1u, mem->vmo()->ScanForZeroPages(false));
1366-
EXPECT_EQ(ZX_OK, mem->vmo()->CommitRangePinned(0, PAGE_SIZE, false));
1367-
EXPECT_EQ(0u, mem->vmo()->ScanForZeroPages(false));
1368-
mem->vmo()->Unpin(0, PAGE_SIZE);
1369-
EXPECT_EQ(1u, mem->vmo()->ScanForZeroPages(false));
1370-
1371-
// Creating a kernel mapping should prevent any counting from occurring.
1372-
VmAspace* kernel_aspace = VmAspace::kernel_aspace();
1373-
void* ptr;
1374-
status = kernel_aspace->MapObjectInternal(mem->vmo(), "test", 0, PAGE_SIZE, &ptr, 0,
1375-
VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
1376-
EXPECT_EQ(ZX_OK, status);
1377-
EXPECT_EQ(0u, mem->vmo()->ScanForZeroPages(false));
1378-
kernel_aspace->FreeRegion(reinterpret_cast<vaddr_t>(ptr));
1379-
EXPECT_EQ(1u, mem->vmo()->ScanForZeroPages(false));
1380-
1381-
// Actually evict the page now and check that the attribution count goes down and the eviction
1382-
// event count goes up.
1383-
EXPECT_EQ(1u, mem->vmo()->AttributedPages().uncompressed);
1384-
EXPECT_EQ(0u, mem->vmo()->EvictionEventCount());
1385-
EXPECT_EQ(1u, mem->vmo()->ScanForZeroPages(true));
1386-
EXPECT_EQ(0u, mem->vmo()->AttributedPages().uncompressed);
1387-
EXPECT_EQ(1u, mem->vmo()->EvictionEventCount());
1388-
1389-
END_TEST;
1390-
}
1391-
13921316
static bool vmo_move_pages_on_access_test() {
13931317
BEGIN_TEST;
13941318

@@ -2388,18 +2312,6 @@ static bool vmo_attribution_dedup_test() {
23882312
EXPECT_EQ(true, verify_object_page_attribution(vmo.get(), expected_gen_count,
23892313
AttributionCounts{2u, 0u}));
23902314

2391-
// Scan for zero pages, returning only the count (without triggering any reclamation). This should
2392-
// *not* change the generation count.
2393-
ASSERT_EQ(2u, vmo->ScanForZeroPages(false));
2394-
EXPECT_EQ(true, verify_object_page_attribution(vmo.get(), expected_gen_count,
2395-
AttributionCounts{2u, 0u}));
2396-
2397-
// Scan for zero pages and reclaim them. This should change the generation count.
2398-
ASSERT_EQ(2u, vmo->ScanForZeroPages(true));
2399-
++expected_gen_count;
2400-
EXPECT_EQ(true,
2401-
verify_object_page_attribution(vmo.get(), expected_gen_count, AttributionCounts{}));
2402-
24032315
END_TEST;
24042316
}
24052317

@@ -3710,19 +3622,6 @@ static bool vmo_dedup_dirty_test() {
37103622
// No committed pages remaining.
37113623
EXPECT_EQ(0u, vmo->AttributedPages().uncompressed);
37123624

3713-
// Commit the page again.
3714-
vmo->CommitRange(0, PAGE_SIZE);
3715-
page = vmo->DebugGetPage(0);
3716-
3717-
// The page is still clean.
3718-
EXPECT_TRUE(pmm_page_queues()->DebugPageIsReclaim(page));
3719-
3720-
// Zero scan should be able to dedup a clean page.
3721-
EXPECT_TRUE(vmo->ScanForZeroPages(true));
3722-
3723-
// No committed pages remaining.
3724-
EXPECT_EQ(0u, vmo->AttributedPages().uncompressed);
3725-
37263625
// Write to the page making it dirty.
37273626
uint8_t data = 0xff;
37283627
status = vmo->Write(&data, 0, sizeof(data));
@@ -3734,7 +3633,6 @@ static bool vmo_dedup_dirty_test() {
37343633

37353634
// We should not be able to dedup the page.
37363635
EXPECT_FALSE(vmo->DebugGetCowPages()->DedupZeroPage(page, 0));
3737-
EXPECT_FALSE(vmo->ScanForZeroPages(true));
37383636
EXPECT_EQ(1u, vmo->AttributedPages().uncompressed);
37393637

37403638
END_TEST;
@@ -3766,7 +3664,6 @@ VM_UNITTEST(vmo_lookup_test)
37663664
VM_UNITTEST(vmo_lookup_slice_test)
37673665
VM_UNITTEST(vmo_lookup_clone_test)
37683666
VM_UNITTEST(vmo_clone_removes_write_test)
3769-
VM_UNITTEST(vmo_zero_scan_test)
37703667
VM_UNITTEST(vmo_move_pages_on_access_test)
37713668
VM_UNITTEST(vmo_eviction_hints_test)
37723669
VM_UNITTEST(vmo_always_need_evicts_loaned_test)

zircon/kernel/vm/vm_cow_pages.cc

Lines changed: 0 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -470,75 +470,6 @@ bool VmCowPages::DedupZeroPage(vm_page_t* page, uint64_t offset) {
470470
return false;
471471
}
472472

473-
uint32_t VmCowPages::ScanForZeroPagesLocked(bool reclaim) {
474-
canary_.Assert();
475-
476-
if (!can_decommit_zero_pages_locked()) {
477-
// Even if !reclaim, we don't count zero pages in contiguous VMOs because we can't reclaim them
478-
// anyway (at least not just due to them being zero; user mode can decommit). We also don't
479-
// add contiguous VMO pages to the ZeroFork queue ever, so counting these would only create
480-
// false hope that we could potentially loan the pages despite explicitly not wanting to
481-
// auto-decommit zero pages as expressed by !can_decommit_zero_pages_locked(). In future we
482-
// may relax this restriction on contiguous VMOs at which point it'd be fine to remove zero
483-
// pages (assuming other criteria are met, like not being pinned).
484-
return 0;
485-
}
486-
487-
// Check if we have any slice children. Slice children may have writable mappings to our pages,
488-
// and so we need to also remove any mappings from them. Non-slice children could only have
489-
// read-only mappings, which is the state we already want, and so we don't need to touch them.
490-
for (auto& child : children_list_) {
491-
AssertHeld(child.lock_);
492-
if (child.is_slice_locked()) {
493-
// Slices are strict subsets of their parents so we don't need to bother looking at parent
494-
// limits etc and can just operate on the entire range.
495-
child.RangeChangeUpdateLocked(0, child.size_, RangeChangeOp::RemoveWrite);
496-
}
497-
}
498-
499-
// We stack-own loaned pages from when they're removed until they're freed.
500-
__UNINITIALIZED StackOwnedLoanedPagesInterval raii_interval;
501-
502-
list_node_t freed_list;
503-
list_initialize(&freed_list);
504-
505-
uint32_t count = 0;
506-
page_list_.RemovePages(
507-
[&count, &freed_list, reclaim, this](VmPageOrMarker* p, uint64_t off) {
508-
// Pinned pages cannot be decommitted so do not consider them, and also skip references as
509-
// they they are presumably already deduped and would need to be decompressed to scan them.
510-
// If the page supports dirty tracking, it should be Clean.
511-
if (p->IsPage() && p->Page()->object.pin_count == 0 &&
512-
(!is_page_dirty_tracked(p->Page()) || is_page_clean(p->Page())) &&
513-
IsZeroPage(p->Page())) {
514-
count++;
515-
if (reclaim) {
516-
// Need to remove all mappings (include read) ones to this range before we remove the
517-
// page.
518-
AssertHeld(this->lock_);
519-
RangeChangeUpdateLocked(off, PAGE_SIZE, RangeChangeOp::Unmap);
520-
vm_page_t* page = p->ReleasePage();
521-
pmm_page_queues()->Remove(page);
522-
DEBUG_ASSERT(!list_in_list(&page->queue_node));
523-
list_add_tail(&freed_list, &page->queue_node);
524-
*p = VmPageOrMarker::Marker();
525-
}
526-
}
527-
return ZX_ERR_NEXT;
528-
},
529-
0, VmPageList::MAX_SIZE);
530-
531-
FreePagesLocked(&freed_list, /*freeing_owned_pages=*/true);
532-
533-
if (reclaim && count > 0) {
534-
IncrementHierarchyGenerationCountLocked();
535-
// A batch free is counted as a single eviction event.
536-
eviction_event_count_++;
537-
}
538-
539-
return count;
540-
}
541-
542473
zx_status_t VmCowPages::Create(fbl::RefPtr<VmHierarchyState> root_lock, VmCowPagesOptions options,
543474
uint32_t pmm_alloc_flags, uint64_t size,
544475
fbl::RefPtr<VmCowPages>* cow_pages) {

zircon/kernel/vm/vm_object.cc

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -50,16 +50,6 @@ VmObject::~VmObject() {
5050
DEBUG_ASSERT(children_list_.is_empty());
5151
}
5252

53-
uint32_t VmObject::ScanAllForZeroPages(bool reclaim) {
54-
uint32_t count = 0;
55-
Guard<CriticalMutex> guard{AllVmosLock::Get()};
56-
57-
for (auto& vmo : all_vmos_) {
58-
count += vmo.ScanForZeroPages(reclaim);
59-
}
60-
return count;
61-
}
62-
6353
void VmObject::AddToGlobalList() {
6454
Guard<CriticalMutex> guard{AllVmosLock::Get()};
6555
all_vmos_.push_back(this);

zircon/kernel/vm/vm_object_paged.cc

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -166,31 +166,6 @@ bool VmObjectPaged::CanDedupZeroPagesLocked() {
166166
return true;
167167
}
168168

169-
uint32_t VmObjectPaged::ScanForZeroPages(bool reclaim) {
170-
canary_.Assert();
171-
172-
Guard<CriticalMutex> guard{lock()};
173-
174-
// Skip uncached VMOs as we cannot efficiently scan them.
175-
if ((cache_policy_ & ZX_CACHE_POLICY_MASK) != ZX_CACHE_POLICY_CACHED) {
176-
return 0;
177-
}
178-
179-
// Skip any VMOs that have non user mappings as we cannot safely remove write permissions from
180-
// them and indicates this VMO is actually in use by the kernel and we probably would not want to
181-
// perform zero page de-duplication on it even if we could.
182-
for (auto& m : mapping_list_) {
183-
if (!m.aspace()->is_user()) {
184-
return 0;
185-
}
186-
// Remove write from the mapping to ensure it's not being concurrently modified.
187-
m.assert_object_lock();
188-
m.AspaceRemoveWriteVmoRangeLocked(0, size_locked());
189-
}
190-
191-
return cow_pages_locked()->ScanForZeroPagesLocked(reclaim);
192-
}
193-
194169
zx_status_t VmObjectPaged::CreateCommon(uint32_t pmm_alloc_flags, uint32_t options, uint64_t size,
195170
fbl::RefPtr<VmObjectPaged>* obj) {
196171
DEBUG_ASSERT(!(options & (kContiguous | kCanBlockOnPageRequests)));

0 commit comments

Comments
 (0)