8
8
9
9
#define pr_fmt (fmt ) KBUILD_MODNAME ": " fmt
10
10
11
+ #include <linux/cleanup.h>
11
12
#include <linux/kernel.h>
12
13
#include <linux/jiffies.h>
13
14
#include <linux/mman.h>
@@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
646
647
void * v )
647
648
{
648
649
struct memory_notify * mem = (struct memory_notify * )v ;
649
- unsigned long flags , pfn_count ;
650
+ unsigned long pfn_count ;
650
651
651
652
switch (val ) {
652
653
case MEM_ONLINE :
@@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
655
656
break ;
656
657
657
658
case MEM_OFFLINE :
658
- spin_lock_irqsave (& dm_device .ha_lock , flags );
659
- pfn_count = hv_page_offline_check (mem -> start_pfn ,
660
- mem -> nr_pages );
661
- if (pfn_count <= dm_device .num_pages_onlined ) {
662
- dm_device .num_pages_onlined -= pfn_count ;
663
- } else {
664
- /*
665
- * We're offlining more pages than we managed to online.
666
- * This is unexpected. In any case don't let
667
- * num_pages_onlined wrap around zero.
668
- */
669
- WARN_ON_ONCE (1 );
670
- dm_device .num_pages_onlined = 0 ;
659
+ scoped_guard (spinlock_irqsave , & dm_device .ha_lock ) {
660
+ pfn_count = hv_page_offline_check (mem -> start_pfn ,
661
+ mem -> nr_pages );
662
+ if (pfn_count <= dm_device .num_pages_onlined ) {
663
+ dm_device .num_pages_onlined -= pfn_count ;
664
+ } else {
665
+ /*
666
+ * We're offlining more pages than we
667
+ * managed to online. This is
668
+ * unexpected. In any case don't let
669
+ * num_pages_onlined wrap around zero.
670
+ */
671
+ WARN_ON_ONCE (1 );
672
+ dm_device .num_pages_onlined = 0 ;
673
+ }
671
674
}
672
- spin_unlock_irqrestore (& dm_device .ha_lock , flags );
673
675
break ;
674
676
case MEM_GOING_ONLINE :
675
677
case MEM_GOING_OFFLINE :
@@ -721,24 +723,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
721
723
unsigned long start_pfn ;
722
724
unsigned long processed_pfn ;
723
725
unsigned long total_pfn = pfn_count ;
724
- unsigned long flags ;
725
726
726
727
for (i = 0 ; i < (size /HA_CHUNK ); i ++ ) {
727
728
start_pfn = start + (i * HA_CHUNK );
728
729
729
- spin_lock_irqsave ( & dm_device .ha_lock , flags );
730
- has -> ha_end_pfn += HA_CHUNK ;
730
+ scoped_guard ( spinlock_irqsave , & dm_device .ha_lock ) {
731
+ has -> ha_end_pfn += HA_CHUNK ;
731
732
732
- if (total_pfn > HA_CHUNK ) {
733
- processed_pfn = HA_CHUNK ;
734
- total_pfn -= HA_CHUNK ;
735
- } else {
736
- processed_pfn = total_pfn ;
737
- total_pfn = 0 ;
738
- }
733
+ if (total_pfn > HA_CHUNK ) {
734
+ processed_pfn = HA_CHUNK ;
735
+ total_pfn -= HA_CHUNK ;
736
+ } else {
737
+ processed_pfn = total_pfn ;
738
+ total_pfn = 0 ;
739
+ }
739
740
740
- has -> covered_end_pfn += processed_pfn ;
741
- spin_unlock_irqrestore ( & dm_device . ha_lock , flags );
741
+ has -> covered_end_pfn += processed_pfn ;
742
+ }
742
743
743
744
reinit_completion (& dm_device .ol_waitevent );
744
745
@@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
758
759
*/
759
760
do_hot_add = false;
760
761
}
761
- spin_lock_irqsave ( & dm_device .ha_lock , flags );
762
- has -> ha_end_pfn -= HA_CHUNK ;
763
- has -> covered_end_pfn -= processed_pfn ;
764
- spin_unlock_irqrestore ( & dm_device . ha_lock , flags );
762
+ scoped_guard ( spinlock_irqsave , & dm_device .ha_lock ) {
763
+ has -> ha_end_pfn -= HA_CHUNK ;
764
+ has -> covered_end_pfn -= processed_pfn ;
765
+ }
765
766
break ;
766
767
}
767
768
@@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
781
782
static void hv_online_page (struct page * pg , unsigned int order )
782
783
{
783
784
struct hv_hotadd_state * has ;
784
- unsigned long flags ;
785
785
unsigned long pfn = page_to_pfn (pg );
786
786
787
- spin_lock_irqsave ( & dm_device .ha_lock , flags );
787
+ guard ( spinlock_irqsave )( & dm_device .ha_lock );
788
788
list_for_each_entry (has , & dm_device .ha_region_list , list ) {
789
789
/* The page belongs to a different HAS. */
790
790
if ((pfn < has -> start_pfn ) ||
@@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order)
794
794
hv_bring_pgs_online (has , pfn , 1UL << order );
795
795
break ;
796
796
}
797
- spin_unlock_irqrestore (& dm_device .ha_lock , flags );
798
797
}
799
798
800
799
static int pfn_covered (unsigned long start_pfn , unsigned long pfn_cnt )
@@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
803
802
struct hv_hotadd_gap * gap ;
804
803
unsigned long residual , new_inc ;
805
804
int ret = 0 ;
806
- unsigned long flags ;
807
805
808
- spin_lock_irqsave ( & dm_device .ha_lock , flags );
806
+ guard ( spinlock_irqsave )( & dm_device .ha_lock );
809
807
list_for_each_entry (has , & dm_device .ha_region_list , list ) {
810
808
/*
811
809
* If the pfn range we are dealing with is not in the current
@@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
852
850
ret = 1 ;
853
851
break ;
854
852
}
855
- spin_unlock_irqrestore (& dm_device .ha_lock , flags );
856
853
857
854
return ret ;
858
855
}
@@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
947
944
{
948
945
struct hv_hotadd_state * ha_region = NULL ;
949
946
int covered ;
950
- unsigned long flags ;
951
947
952
948
if (pfn_cnt == 0 )
953
949
return 0 ;
@@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start,
979
975
ha_region -> covered_end_pfn = pg_start ;
980
976
ha_region -> end_pfn = rg_start + rg_size ;
981
977
982
- spin_lock_irqsave ( & dm_device .ha_lock , flags );
983
- list_add_tail (& ha_region -> list , & dm_device .ha_region_list );
984
- spin_unlock_irqrestore ( & dm_device . ha_lock , flags );
978
+ scoped_guard ( spinlock_irqsave , & dm_device .ha_lock ) {
979
+ list_add_tail (& ha_region -> list , & dm_device .ha_region_list );
980
+ }
985
981
}
986
982
987
983
do_pg_range :
@@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev)
2047
2043
struct hv_dynmem_device * dm = hv_get_drvdata (dev );
2048
2044
struct hv_hotadd_state * has , * tmp ;
2049
2045
struct hv_hotadd_gap * gap , * tmp_gap ;
2050
- unsigned long flags ;
2051
2046
2052
2047
if (dm -> num_pages_ballooned != 0 )
2053
2048
pr_warn ("Ballooned pages: %d\n" , dm -> num_pages_ballooned );
@@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev)
2073
2068
#endif
2074
2069
}
2075
2070
2076
- spin_lock_irqsave ( & dm_device .ha_lock , flags );
2071
+ guard ( spinlock_irqsave )( & dm_device .ha_lock );
2077
2072
list_for_each_entry_safe (has , tmp , & dm -> ha_region_list , list ) {
2078
2073
list_for_each_entry_safe (gap , tmp_gap , & has -> gap_list , list ) {
2079
2074
list_del (& gap -> list );
@@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev)
2082
2077
list_del (& has -> list );
2083
2078
kfree (has );
2084
2079
}
2085
- spin_unlock_irqrestore (& dm_device .ha_lock , flags );
2086
2080
}
2087
2081
2088
2082
static int balloon_suspend (struct hv_device * hv_dev )
0 commit comments