@@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
810
810
return false;
811
811
}
812
812
813
- static void z_erofs_decompressqueue_work (struct work_struct * work );
814
- static void z_erofs_decompress_kickoff (struct z_erofs_decompressqueue * io ,
815
- bool sync , int bios )
816
- {
817
- struct erofs_sb_info * const sbi = EROFS_SB (io -> sb );
818
-
819
- /* wake up the caller thread for sync decompression */
820
- if (sync ) {
821
- unsigned long flags ;
822
-
823
- spin_lock_irqsave (& io -> u .wait .lock , flags );
824
- if (!atomic_add_return (bios , & io -> pending_bios ))
825
- wake_up_locked (& io -> u .wait );
826
- spin_unlock_irqrestore (& io -> u .wait .lock , flags );
827
- return ;
828
- }
829
-
830
- if (atomic_add_return (bios , & io -> pending_bios ))
831
- return ;
832
- /* Use workqueue and sync decompression for atomic contexts only */
833
- if (in_atomic () || irqs_disabled ()) {
834
- queue_work (z_erofs_workqueue , & io -> u .work );
835
- /* enable sync decompression for readahead */
836
- if (sbi -> opt .sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO )
837
- sbi -> opt .sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON ;
838
- return ;
839
- }
840
- z_erofs_decompressqueue_work (& io -> u .work );
841
- }
842
-
843
813
static bool z_erofs_page_is_invalidated (struct page * page )
844
814
{
845
815
return !page -> mapping && !z_erofs_is_shortlived_page (page );
846
816
}
847
817
848
- static void z_erofs_decompressqueue_endio (struct bio * bio )
849
- {
850
- tagptr1_t t = tagptr_init (tagptr1_t , bio -> bi_private );
851
- struct z_erofs_decompressqueue * q = tagptr_unfold_ptr (t );
852
- blk_status_t err = bio -> bi_status ;
853
- struct bio_vec * bvec ;
854
- struct bvec_iter_all iter_all ;
855
-
856
- bio_for_each_segment_all (bvec , bio , iter_all ) {
857
- struct page * page = bvec -> bv_page ;
858
-
859
- DBG_BUGON (PageUptodate (page ));
860
- DBG_BUGON (z_erofs_page_is_invalidated (page ));
861
-
862
- if (err )
863
- SetPageError (page );
864
-
865
- if (erofs_page_is_managed (EROFS_SB (q -> sb ), page )) {
866
- if (!err )
867
- SetPageUptodate (page );
868
- unlock_page (page );
869
- }
870
- }
871
- z_erofs_decompress_kickoff (q , tagptr_unfold_tags (t ), -1 );
872
- bio_put (bio );
873
- }
874
-
875
818
static int z_erofs_decompress_pcluster (struct super_block * sb ,
876
819
struct z_erofs_pcluster * pcl ,
877
820
struct page * * pagepool )
@@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
1123
1066
kvfree (bgq );
1124
1067
}
1125
1068
1069
+ static void z_erofs_decompress_kickoff (struct z_erofs_decompressqueue * io ,
1070
+ bool sync , int bios )
1071
+ {
1072
+ struct erofs_sb_info * const sbi = EROFS_SB (io -> sb );
1073
+
1074
+ /* wake up the caller thread for sync decompression */
1075
+ if (sync ) {
1076
+ unsigned long flags ;
1077
+
1078
+ spin_lock_irqsave (& io -> u .wait .lock , flags );
1079
+ if (!atomic_add_return (bios , & io -> pending_bios ))
1080
+ wake_up_locked (& io -> u .wait );
1081
+ spin_unlock_irqrestore (& io -> u .wait .lock , flags );
1082
+ return ;
1083
+ }
1084
+
1085
+ if (atomic_add_return (bios , & io -> pending_bios ))
1086
+ return ;
1087
+ /* Use workqueue and sync decompression for atomic contexts only */
1088
+ if (in_atomic () || irqs_disabled ()) {
1089
+ queue_work (z_erofs_workqueue , & io -> u .work );
1090
+ /* enable sync decompression for readahead */
1091
+ if (sbi -> opt .sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO )
1092
+ sbi -> opt .sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON ;
1093
+ return ;
1094
+ }
1095
+ z_erofs_decompressqueue_work (& io -> u .work );
1096
+ }
1097
+
1126
1098
static struct page * pickup_page_for_submission (struct z_erofs_pcluster * pcl ,
1127
1099
unsigned int nr ,
1128
1100
struct page * * pagepool ,
@@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1300
1272
qtail [JQ_BYPASS ] = & pcl -> next ;
1301
1273
}
1302
1274
1275
+ static void z_erofs_decompressqueue_endio (struct bio * bio )
1276
+ {
1277
+ tagptr1_t t = tagptr_init (tagptr1_t , bio -> bi_private );
1278
+ struct z_erofs_decompressqueue * q = tagptr_unfold_ptr (t );
1279
+ blk_status_t err = bio -> bi_status ;
1280
+ struct bio_vec * bvec ;
1281
+ struct bvec_iter_all iter_all ;
1282
+
1283
+ bio_for_each_segment_all (bvec , bio , iter_all ) {
1284
+ struct page * page = bvec -> bv_page ;
1285
+
1286
+ DBG_BUGON (PageUptodate (page ));
1287
+ DBG_BUGON (z_erofs_page_is_invalidated (page ));
1288
+
1289
+ if (err )
1290
+ SetPageError (page );
1291
+
1292
+ if (erofs_page_is_managed (EROFS_SB (q -> sb ), page )) {
1293
+ if (!err )
1294
+ SetPageUptodate (page );
1295
+ unlock_page (page );
1296
+ }
1297
+ }
1298
+ z_erofs_decompress_kickoff (q , tagptr_unfold_tags (t ), -1 );
1299
+ bio_put (bio );
1300
+ }
1301
+
1303
1302
static void z_erofs_submit_queue (struct super_block * sb ,
1304
1303
struct z_erofs_decompress_frontend * f ,
1305
1304
struct page * * pagepool ,
0 commit comments