Skip to content

Commit 7865827

Browse files
committed
erofs: avoid unnecessary z_erofs_decompressqueue_work() declaration
Just code rearrange. No logic changes. Link: https://lore.kernel.org/r/20220121091412.86086-1-hsiangkao@linux.alibaba.com Reviewed-by: Yue Hu <huyue2@yulong.com> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
1 parent e33f42b commit 7865827

File tree

1 file changed

+56
-57
lines changed

1 file changed

+56
-57
lines changed

fs/erofs/zdata.c

Lines changed: 56 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
810810
return false;
811811
}
812812

813-
static void z_erofs_decompressqueue_work(struct work_struct *work);
814-
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
815-
bool sync, int bios)
816-
{
817-
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
818-
819-
/* wake up the caller thread for sync decompression */
820-
if (sync) {
821-
unsigned long flags;
822-
823-
spin_lock_irqsave(&io->u.wait.lock, flags);
824-
if (!atomic_add_return(bios, &io->pending_bios))
825-
wake_up_locked(&io->u.wait);
826-
spin_unlock_irqrestore(&io->u.wait.lock, flags);
827-
return;
828-
}
829-
830-
if (atomic_add_return(bios, &io->pending_bios))
831-
return;
832-
/* Use workqueue and sync decompression for atomic contexts only */
833-
if (in_atomic() || irqs_disabled()) {
834-
queue_work(z_erofs_workqueue, &io->u.work);
835-
/* enable sync decompression for readahead */
836-
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
837-
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
838-
return;
839-
}
840-
z_erofs_decompressqueue_work(&io->u.work);
841-
}
842-
843813
static bool z_erofs_page_is_invalidated(struct page *page)
844814
{
845815
return !page->mapping && !z_erofs_is_shortlived_page(page);
846816
}
847817

848-
static void z_erofs_decompressqueue_endio(struct bio *bio)
849-
{
850-
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
851-
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
852-
blk_status_t err = bio->bi_status;
853-
struct bio_vec *bvec;
854-
struct bvec_iter_all iter_all;
855-
856-
bio_for_each_segment_all(bvec, bio, iter_all) {
857-
struct page *page = bvec->bv_page;
858-
859-
DBG_BUGON(PageUptodate(page));
860-
DBG_BUGON(z_erofs_page_is_invalidated(page));
861-
862-
if (err)
863-
SetPageError(page);
864-
865-
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
866-
if (!err)
867-
SetPageUptodate(page);
868-
unlock_page(page);
869-
}
870-
}
871-
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
872-
bio_put(bio);
873-
}
874-
875818
static int z_erofs_decompress_pcluster(struct super_block *sb,
876819
struct z_erofs_pcluster *pcl,
877820
struct page **pagepool)
@@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
11231066
kvfree(bgq);
11241067
}
11251068

1069+
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1070+
bool sync, int bios)
1071+
{
1072+
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1073+
1074+
/* wake up the caller thread for sync decompression */
1075+
if (sync) {
1076+
unsigned long flags;
1077+
1078+
spin_lock_irqsave(&io->u.wait.lock, flags);
1079+
if (!atomic_add_return(bios, &io->pending_bios))
1080+
wake_up_locked(&io->u.wait);
1081+
spin_unlock_irqrestore(&io->u.wait.lock, flags);
1082+
return;
1083+
}
1084+
1085+
if (atomic_add_return(bios, &io->pending_bios))
1086+
return;
1087+
/* Use workqueue and sync decompression for atomic contexts only */
1088+
if (in_atomic() || irqs_disabled()) {
1089+
queue_work(z_erofs_workqueue, &io->u.work);
1090+
/* enable sync decompression for readahead */
1091+
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1092+
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1093+
return;
1094+
}
1095+
z_erofs_decompressqueue_work(&io->u.work);
1096+
}
1097+
11261098
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
11271099
unsigned int nr,
11281100
struct page **pagepool,
@@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
13001272
qtail[JQ_BYPASS] = &pcl->next;
13011273
}
13021274

1275+
static void z_erofs_decompressqueue_endio(struct bio *bio)
1276+
{
1277+
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
1278+
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
1279+
blk_status_t err = bio->bi_status;
1280+
struct bio_vec *bvec;
1281+
struct bvec_iter_all iter_all;
1282+
1283+
bio_for_each_segment_all(bvec, bio, iter_all) {
1284+
struct page *page = bvec->bv_page;
1285+
1286+
DBG_BUGON(PageUptodate(page));
1287+
DBG_BUGON(z_erofs_page_is_invalidated(page));
1288+
1289+
if (err)
1290+
SetPageError(page);
1291+
1292+
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1293+
if (!err)
1294+
SetPageUptodate(page);
1295+
unlock_page(page);
1296+
}
1297+
}
1298+
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
1299+
bio_put(bio);
1300+
}
1301+
13031302
static void z_erofs_submit_queue(struct super_block *sb,
13041303
struct z_erofs_decompress_frontend *f,
13051304
struct page **pagepool,

0 commit comments

Comments
 (0)