@@ -41,10 +41,39 @@ static frames_array_t early_frames;
41
41
static list_head_t free_frames [MAX_PAGE_ORDER + 1 ];
42
42
static list_head_t busy_frames [MAX_PAGE_ORDER + 1 ];
43
43
44
- #define MIN_NUM_4K_FRAMES 2
44
+ /*
45
+ * Worst case: pmm wants to refill while paging just started on another thread
46
+ * 1 for pmm refill attempt (new_frames_array)
47
+ * 3 for paging currently ongoing
48
+ * 4 for refill_from_paging (1 array frame, 3 for mapping it)
49
+ */
50
+ #define MIN_NUM_4K_FRAMES (1 + 3 + (1 + 3))
51
+ /* enough array frames to refill 4K frames in the worst case without needing refill */
52
+ #define MIN_NOREFILL_FREE_FRAMES_THRESHOLD \
53
+ (MIN_FREE_FRAMES_THRESHOLD + MIN_NUM_4K_FRAMES + (MAX_PAGE_ORDER - PAGE_ORDER_4K))
45
54
static size_t frames_count [MAX_PAGE_ORDER + 1 ];
46
55
56
+ /* first lock to serialize normal access to pmm (i.e. not array frame refill) */
47
57
static spinlock_t lock = SPINLOCK_INIT ;
58
+ /**
59
+ * second lock to give paging priority access to pmm during array frame refill
60
+ *
61
+ * Ensure that get_free_frame_norefill and refill_from_paging are only called while
62
+ * holding the paging lock.
63
+ */
64
+ static spinlock_t priority_lock = SPINLOCK_INIT ;
65
+
66
+ static void try_create_4k_frames (void );
67
+
68
+ static inline void pmm_lock () {
69
+ spin_lock (& lock );
70
+ spin_lock (& priority_lock );
71
+ }
72
+
73
+ static inline void pmm_unlock () {
74
+ spin_unlock (& priority_lock );
75
+ spin_unlock (& lock );
76
+ }
48
77
49
78
void display_frames_count (void ) {
50
79
printk ("Avail memory frames: (total size: %lu MB)\n" , total_phys_memory / MB (1 ));
@@ -98,7 +127,7 @@ static inline void init_frames_array(frames_array_t *array) {
98
127
total_free_frames += array -> meta .free_count ;
99
128
}
100
129
101
- static frames_array_t * new_frames_array ( void ) {
130
+ static frames_array_t * _new_frames_array ( bool nolock ) {
102
131
frames_array_t * array ;
103
132
frame_t * frame ;
104
133
@@ -109,7 +138,21 @@ static frames_array_t *new_frames_array(void) {
109
138
if (!boot_flags .virt )
110
139
array = (frames_array_t * ) mfn_to_virt_kern (frame -> mfn );
111
140
else {
112
- array = vmap_kern_4k (mfn_to_virt_map (frame -> mfn ), frame -> mfn , L1_PROT );
141
+ /* switch to special refilling mode to avoid deadlock with paging */
142
+ spin_unlock (& priority_lock );
143
+
144
+ /* if we are comming from paging then we have to to this mapping without taking
145
+ * the lock again */
146
+ if (nolock ) {
147
+ array = vmap_kern_4k_nolock (mfn_to_virt_map (frame -> mfn ), frame -> mfn , L1_PROT );
148
+ }
149
+ else {
150
+ array = vmap_kern_4k (mfn_to_virt_map (frame -> mfn ), frame -> mfn , L1_PROT );
151
+ }
152
+
153
+ /* switch back to normal mode */
154
+ spin_lock (& priority_lock );
155
+
113
156
if (!array )
114
157
goto error ;
115
158
}
@@ -124,6 +167,14 @@ static frames_array_t *new_frames_array(void) {
124
167
UNREACHABLE ();
125
168
}
126
169
170
+ static inline frames_array_t * new_frames_array () {
171
+ return _new_frames_array (false);
172
+ }
173
+
174
+ static inline frames_array_t * new_frames_array_nolock () {
175
+ return _new_frames_array (true);
176
+ }
177
+
127
178
static void del_frames_array (frames_array_t * array ) {
128
179
ASSERT (array );
129
180
@@ -426,31 +477,31 @@ static frame_t *_find_mfn_frame(list_head_t *list, mfn_t mfn, unsigned int order
426
477
frame_t * find_free_mfn_frame (mfn_t mfn , unsigned int order ) {
427
478
frame_t * frame ;
428
479
429
- spin_lock ( & lock );
480
+ pmm_lock ( );
430
481
frame = _find_mfn_frame (free_frames , mfn , order );
431
- spin_unlock ( & lock );
482
+ pmm_unlock ( );
432
483
433
484
return frame ;
434
485
}
435
486
436
487
frame_t * find_busy_mfn_frame (mfn_t mfn , unsigned int order ) {
437
488
frame_t * frame ;
438
489
439
- spin_lock ( & lock );
490
+ pmm_lock ( );
440
491
frame = _find_mfn_frame (busy_frames , mfn , order );
441
- spin_unlock ( & lock );
492
+ pmm_unlock ( );
442
493
443
494
return frame ;
444
495
}
445
496
446
497
frame_t * find_mfn_frame (mfn_t mfn , unsigned int order ) {
447
498
frame_t * frame ;
448
499
449
- spin_lock ( & lock );
500
+ pmm_lock ( );
450
501
frame = _find_mfn_frame (busy_frames , mfn , order );
451
502
if (!frame )
452
503
frame = _find_mfn_frame (free_frames , mfn , order );
453
- spin_unlock ( & lock );
504
+ pmm_unlock ( );
454
505
455
506
return frame ;
456
507
}
@@ -471,31 +522,31 @@ static frame_t *_find_paddr_frame(list_head_t *list, paddr_t paddr) {
471
522
frame_t * find_free_paddr_frame (paddr_t paddr ) {
472
523
frame_t * frame ;
473
524
474
- spin_lock ( & lock );
525
+ pmm_lock ( );
475
526
frame = _find_paddr_frame (free_frames , paddr );
476
- spin_unlock ( & lock );
527
+ pmm_unlock ( );
477
528
478
529
return frame ;
479
530
}
480
531
481
532
frame_t * find_busy_paddr_frame (paddr_t paddr ) {
482
533
frame_t * frame ;
483
534
484
- spin_lock ( & lock );
535
+ pmm_lock ( );
485
536
frame = _find_paddr_frame (busy_frames , paddr );
486
- spin_unlock ( & lock );
537
+ pmm_unlock ( );
487
538
488
539
return frame ;
489
540
}
490
541
491
542
frame_t * find_paddr_frame (paddr_t paddr ) {
492
543
frame_t * frame ;
493
544
494
- spin_lock ( & lock );
545
+ pmm_lock ( );
495
546
frame = _find_paddr_frame (busy_frames , paddr );
496
547
if (!frame )
497
548
frame = _find_paddr_frame (free_frames , paddr );
498
- spin_unlock ( & lock );
549
+ pmm_unlock ( );
499
550
500
551
return frame ;
501
552
}
@@ -599,20 +650,20 @@ static void try_create_4k_frames(void) {
599
650
* This function does not split larger frames.
600
651
*/
601
652
frame_t * get_free_frames_cond (free_frames_cond_t cb ) {
602
- spin_lock ( & lock );
653
+ pmm_lock ( );
603
654
try_create_4k_frames ();
604
655
for_each_order (order ) {
605
656
frame_t * frame ;
606
657
607
658
list_for_each_entry (frame , & free_frames [order ], list ) {
608
659
if (cb (frame )) {
609
660
reserve_frame (frame );
610
- spin_unlock ( & lock );
661
+ pmm_unlock ( );
611
662
return frame ;
612
663
}
613
664
}
614
665
}
615
- spin_unlock ( & lock );
666
+ pmm_unlock ( );
616
667
617
668
return NULL ;
618
669
}
@@ -623,22 +674,22 @@ frame_t *get_free_frames(unsigned int order) {
623
674
if (order > MAX_PAGE_ORDER )
624
675
return NULL ;
625
676
626
- spin_lock ( & lock );
677
+ pmm_lock ( );
627
678
if (order == PAGE_ORDER_4K )
628
679
try_create_4k_frames ();
629
680
630
681
while (list_is_empty (& free_frames [order ])) {
631
682
BUG_ON (order == PAGE_ORDER_4K );
632
683
frame = find_larger_frame (free_frames , order );
633
684
if (!frame ) {
634
- spin_unlock ( & lock );
685
+ pmm_unlock ( );
635
686
return NULL ;
636
687
}
637
688
split_frame (frame );
638
689
}
639
690
640
691
frame = reserve_frame (get_first_frame (free_frames , order ));
641
- spin_unlock ( & lock );
692
+ pmm_unlock ( );
642
693
643
694
return frame ;
644
695
}
@@ -648,7 +699,7 @@ void put_free_frames(mfn_t mfn, unsigned int order) {
648
699
649
700
ASSERT (order <= MAX_PAGE_ORDER );
650
701
651
- spin_lock ( & lock );
702
+ pmm_lock ( );
652
703
frame = _find_mfn_frame (busy_frames , mfn , order );
653
704
if (!frame ) {
654
705
warning ("PMM: unable to find frame: %lx, order: %u among busy frames" , mfn ,
@@ -660,7 +711,7 @@ void put_free_frames(mfn_t mfn, unsigned int order) {
660
711
merge_frames (frame );
661
712
662
713
unlock :
663
- spin_unlock ( & lock );
714
+ pmm_unlock ( );
664
715
}
665
716
666
717
void map_frames_array (void ) {
@@ -674,3 +725,50 @@ void map_frames_array(void) {
674
725
BUG_ON (!vmap_kern_4k (va , mfn , L1_PROT ));
675
726
}
676
727
}
728
+
729
+ /* functions for paging to avoid deadlocks */
730
+
731
+ frame_t * get_free_frame_norefill (void ) {
732
+ frame_t * frame ;
733
+
734
+ spin_lock (& priority_lock );
735
+ frame = reserve_frame (get_first_frame (free_frames , PAGE_ORDER_4K ));
736
+ spin_unlock (& priority_lock );
737
+
738
+ /* we ran out of reserved frames. increase MIN_NUM_4K_FRAMES */
739
+ BUG_ON (!frame );
740
+
741
+ return frame ;
742
+ }
743
+
744
+ /**
745
+ * Function to refill 4k pages after using get_free_frame_norefill. Caller
746
+ * needs to hold the vmap_lock!
747
+ */
748
+ void refill_from_paging (void ) {
749
+ /* avoid recursive refilling after paging; variable protected by vmap_lock */
750
+ static bool refill_from_paging_ongoing = false;
751
+
752
+ /* avoid refill_from_paging being called as a result of refill_from_paging */
753
+ if (refill_from_paging_ongoing )
754
+ return ;
755
+
756
+ refill_from_paging_ongoing = true;
757
+
758
+ spin_lock (& priority_lock );
759
+
760
+ /* ensure enough space to refill 4K frames without frame array allocation */
761
+ if (total_free_frames < MIN_NOREFILL_FREE_FRAMES_THRESHOLD )
762
+ new_frames_array_nolock ();
763
+ /* if this fails, increase MIN_NUM_4K_FRAMES to allow for multiple array refills
764
+ * and change the "if" to a "while" above.
765
+ */
766
+ BUG_ON (total_free_frames < MIN_NOREFILL_FREE_FRAMES_THRESHOLD );
767
+
768
+ /* refill the 4K frames */
769
+ try_create_4k_frames ();
770
+
771
+ spin_unlock (& priority_lock );
772
+
773
+ refill_from_paging_ongoing = false;
774
+ }
0 commit comments