Skip to content

Commit 4f74fb3

Browse files
chessturoliuw
authored andcommitted
hv_balloon: Update the balloon driver to use the SBRM API
This patch is intended as a proof-of-concept for the new SBRM machinery[1]. For some brief background, the idea behind SBRM is using the __cleanup__ attribute to automatically unlock locks (or otherwise release resources) when they go out of scope, similar to C++ style RAII. This promises some benefits such as making code simpler (particularly where you have lots of goto fail; type constructs) as well as reducing the surface area for certain kinds of bugs. The changes in this patch should not result in any difference in how the code actually runs (i.e., it's purely an exercise in this new syntax sugar). In one instance SBRM was not appropriate, so I left that part alone, but all other locking/unlocking is handled automatically in this patch. [1] https://lore.kernel.org/all/20230626125726.GU4253@hirez.programming.kicks-ass.net/ Suggested-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: "Mitchell Levy (Microsoft)" <levymitchell0@gmail.com> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Wei Liu <wei.liu@kernel.org> Link: https://lore.kernel.org/r/20230807-sbrm-hyperv-v2-1-9d2ac15305bd@gmail.com
1 parent 5d0c230 commit 4f74fb3

File tree

1 file changed

+38
-44
lines changed

1 file changed

+38
-44
lines changed

drivers/hv/hv_balloon.c

Lines changed: 38 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1010

11+
#include <linux/cleanup.h>
1112
#include <linux/kernel.h>
1213
#include <linux/jiffies.h>
1314
#include <linux/mman.h>
@@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
646647
void *v)
647648
{
648649
struct memory_notify *mem = (struct memory_notify *)v;
649-
unsigned long flags, pfn_count;
650+
unsigned long pfn_count;
650651

651652
switch (val) {
652653
case MEM_ONLINE:
@@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
655656
break;
656657

657658
case MEM_OFFLINE:
658-
spin_lock_irqsave(&dm_device.ha_lock, flags);
659-
pfn_count = hv_page_offline_check(mem->start_pfn,
660-
mem->nr_pages);
661-
if (pfn_count <= dm_device.num_pages_onlined) {
662-
dm_device.num_pages_onlined -= pfn_count;
663-
} else {
664-
/*
665-
* We're offlining more pages than we managed to online.
666-
* This is unexpected. In any case don't let
667-
* num_pages_onlined wrap around zero.
668-
*/
669-
WARN_ON_ONCE(1);
670-
dm_device.num_pages_onlined = 0;
659+
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
660+
pfn_count = hv_page_offline_check(mem->start_pfn,
661+
mem->nr_pages);
662+
if (pfn_count <= dm_device.num_pages_onlined) {
663+
dm_device.num_pages_onlined -= pfn_count;
664+
} else {
665+
/*
666+
* We're offlining more pages than we
667+
* managed to online. This is
668+
* unexpected. In any case don't let
669+
* num_pages_onlined wrap around zero.
670+
*/
671+
WARN_ON_ONCE(1);
672+
dm_device.num_pages_onlined = 0;
673+
}
671674
}
672-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
673675
break;
674676
case MEM_GOING_ONLINE:
675677
case MEM_GOING_OFFLINE:
@@ -721,24 +723,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
721723
unsigned long start_pfn;
722724
unsigned long processed_pfn;
723725
unsigned long total_pfn = pfn_count;
724-
unsigned long flags;
725726

726727
for (i = 0; i < (size/HA_CHUNK); i++) {
727728
start_pfn = start + (i * HA_CHUNK);
728729

729-
spin_lock_irqsave(&dm_device.ha_lock, flags);
730-
has->ha_end_pfn += HA_CHUNK;
730+
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
731+
has->ha_end_pfn += HA_CHUNK;
731732

732-
if (total_pfn > HA_CHUNK) {
733-
processed_pfn = HA_CHUNK;
734-
total_pfn -= HA_CHUNK;
735-
} else {
736-
processed_pfn = total_pfn;
737-
total_pfn = 0;
738-
}
733+
if (total_pfn > HA_CHUNK) {
734+
processed_pfn = HA_CHUNK;
735+
total_pfn -= HA_CHUNK;
736+
} else {
737+
processed_pfn = total_pfn;
738+
total_pfn = 0;
739+
}
739740

740-
has->covered_end_pfn += processed_pfn;
741-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
741+
has->covered_end_pfn += processed_pfn;
742+
}
742743

743744
reinit_completion(&dm_device.ol_waitevent);
744745

@@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
758759
*/
759760
do_hot_add = false;
760761
}
761-
spin_lock_irqsave(&dm_device.ha_lock, flags);
762-
has->ha_end_pfn -= HA_CHUNK;
763-
has->covered_end_pfn -= processed_pfn;
764-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
762+
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
763+
has->ha_end_pfn -= HA_CHUNK;
764+
has->covered_end_pfn -= processed_pfn;
765+
}
765766
break;
766767
}
767768

@@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
781782
static void hv_online_page(struct page *pg, unsigned int order)
782783
{
783784
struct hv_hotadd_state *has;
784-
unsigned long flags;
785785
unsigned long pfn = page_to_pfn(pg);
786786

787-
spin_lock_irqsave(&dm_device.ha_lock, flags);
787+
guard(spinlock_irqsave)(&dm_device.ha_lock);
788788
list_for_each_entry(has, &dm_device.ha_region_list, list) {
789789
/* The page belongs to a different HAS. */
790790
if ((pfn < has->start_pfn) ||
@@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order)
794794
hv_bring_pgs_online(has, pfn, 1UL << order);
795795
break;
796796
}
797-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
798797
}
799798

800799
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
803802
struct hv_hotadd_gap *gap;
804803
unsigned long residual, new_inc;
805804
int ret = 0;
806-
unsigned long flags;
807805

808-
spin_lock_irqsave(&dm_device.ha_lock, flags);
806+
guard(spinlock_irqsave)(&dm_device.ha_lock);
809807
list_for_each_entry(has, &dm_device.ha_region_list, list) {
810808
/*
811809
* If the pfn range we are dealing with is not in the current
@@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
852850
ret = 1;
853851
break;
854852
}
855-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
856853

857854
return ret;
858855
}
@@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
947944
{
948945
struct hv_hotadd_state *ha_region = NULL;
949946
int covered;
950-
unsigned long flags;
951947

952948
if (pfn_cnt == 0)
953949
return 0;
@@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start,
979975
ha_region->covered_end_pfn = pg_start;
980976
ha_region->end_pfn = rg_start + rg_size;
981977

982-
spin_lock_irqsave(&dm_device.ha_lock, flags);
983-
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
984-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
978+
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
979+
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
980+
}
985981
}
986982

987983
do_pg_range:
@@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev)
20472043
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
20482044
struct hv_hotadd_state *has, *tmp;
20492045
struct hv_hotadd_gap *gap, *tmp_gap;
2050-
unsigned long flags;
20512046

20522047
if (dm->num_pages_ballooned != 0)
20532048
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev)
20732068
#endif
20742069
}
20752070

2076-
spin_lock_irqsave(&dm_device.ha_lock, flags);
2071+
guard(spinlock_irqsave)(&dm_device.ha_lock);
20772072
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
20782073
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
20792074
list_del(&gap->list);
@@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev)
20822077
list_del(&has->list);
20832078
kfree(has);
20842079
}
2085-
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
20862080
}
20872081

20882082
static int balloon_suspend(struct hv_device *hv_dev)

0 commit comments

Comments
 (0)