@@ -199,7 +199,6 @@ struct z_erofs_decompress_frontend {
199
199
struct z_erofs_pagevec_ctor vector ;
200
200
201
201
struct z_erofs_pcluster * pcl , * tailpcl ;
202
- struct z_erofs_collection * cl ;
203
202
/* a pointer used to pick up inplace I/O pages */
204
203
struct page * * icpage_ptr ;
205
204
z_erofs_next_pcluster_t owned_head ;
@@ -357,7 +356,7 @@ static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
357
356
return false;
358
357
}
359
358
360
- /* callers must be with collection lock held */
359
+ /* callers must be with pcluster lock held */
361
360
static int z_erofs_attach_page (struct z_erofs_decompress_frontend * fe ,
362
361
struct page * page , enum z_erofs_page_type type ,
363
362
bool pvec_safereuse )
@@ -372,7 +371,7 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
372
371
373
372
ret = z_erofs_pagevec_enqueue (& fe -> vector , page , type ,
374
373
pvec_safereuse );
375
- fe -> cl -> vcnt += (unsigned int )ret ;
374
+ fe -> pcl -> vcnt += (unsigned int )ret ;
376
375
return ret ? 0 : - EAGAIN ;
377
376
}
378
377
@@ -405,12 +404,11 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
405
404
f -> mode = COLLECT_PRIMARY ;
406
405
}
407
406
408
- static int z_erofs_lookup_collection (struct z_erofs_decompress_frontend * fe ,
409
- struct inode * inode ,
410
- struct erofs_map_blocks * map )
407
+ static int z_erofs_lookup_pcluster (struct z_erofs_decompress_frontend * fe ,
408
+ struct inode * inode ,
409
+ struct erofs_map_blocks * map )
411
410
{
412
411
struct z_erofs_pcluster * pcl = fe -> pcl ;
413
- struct z_erofs_collection * cl ;
414
412
unsigned int length ;
415
413
416
414
/* to avoid unexpected loop formed by corrupted images */
@@ -419,8 +417,7 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
419
417
return - EFSCORRUPTED ;
420
418
}
421
419
422
- cl = z_erofs_primarycollection (pcl );
423
- if (cl -> pageofs != (map -> m_la & ~PAGE_MASK )) {
420
+ if (pcl -> pageofs_out != (map -> m_la & ~PAGE_MASK )) {
424
421
DBG_BUGON (1 );
425
422
return - EFSCORRUPTED ;
426
423
}
@@ -443,23 +440,21 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
443
440
length = READ_ONCE (pcl -> length );
444
441
}
445
442
}
446
- mutex_lock (& cl -> lock );
443
+ mutex_lock (& pcl -> lock );
447
444
/* used to check tail merging loop due to corrupted images */
448
445
if (fe -> owned_head == Z_EROFS_PCLUSTER_TAIL )
449
446
fe -> tailpcl = pcl ;
450
447
451
448
z_erofs_try_to_claim_pcluster (fe );
452
- fe -> cl = cl ;
453
449
return 0 ;
454
450
}
455
451
456
- static int z_erofs_register_collection (struct z_erofs_decompress_frontend * fe ,
457
- struct inode * inode ,
458
- struct erofs_map_blocks * map )
452
+ static int z_erofs_register_pcluster (struct z_erofs_decompress_frontend * fe ,
453
+ struct inode * inode ,
454
+ struct erofs_map_blocks * map )
459
455
{
460
456
bool ztailpacking = map -> m_flags & EROFS_MAP_META ;
461
457
struct z_erofs_pcluster * pcl ;
462
- struct z_erofs_collection * cl ;
463
458
struct erofs_workgroup * grp ;
464
459
int err ;
465
460
@@ -482,17 +477,15 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
482
477
483
478
/* new pclusters should be claimed as type 1, primary and followed */
484
479
pcl -> next = fe -> owned_head ;
480
+ pcl -> pageofs_out = map -> m_la & ~PAGE_MASK ;
485
481
fe -> mode = COLLECT_PRIMARY_FOLLOWED ;
486
482
487
- cl = z_erofs_primarycollection (pcl );
488
- cl -> pageofs = map -> m_la & ~PAGE_MASK ;
489
-
490
483
/*
491
484
* lock all primary followed works before visible to others
492
485
* and mutex_trylock *never* fails for a new pcluster.
493
486
*/
494
- mutex_init (& cl -> lock );
495
- DBG_BUGON (!mutex_trylock (& cl -> lock ));
487
+ mutex_init (& pcl -> lock );
488
+ DBG_BUGON (!mutex_trylock (& pcl -> lock ));
496
489
497
490
if (ztailpacking ) {
498
491
pcl -> obj .index = 0 ; /* which indicates ztailpacking */
@@ -519,11 +512,10 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
519
512
fe -> tailpcl = pcl ;
520
513
fe -> owned_head = & pcl -> next ;
521
514
fe -> pcl = pcl ;
522
- fe -> cl = cl ;
523
515
return 0 ;
524
516
525
517
err_out :
526
- mutex_unlock (& cl -> lock );
518
+ mutex_unlock (& pcl -> lock );
527
519
z_erofs_free_pcluster (pcl );
528
520
return err ;
529
521
}
@@ -535,9 +527,9 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
535
527
struct erofs_workgroup * grp ;
536
528
int ret ;
537
529
538
- DBG_BUGON (fe -> cl );
530
+ DBG_BUGON (fe -> pcl );
539
531
540
- /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
532
+ /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
541
533
DBG_BUGON (fe -> owned_head == Z_EROFS_PCLUSTER_NIL );
542
534
DBG_BUGON (fe -> owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED );
543
535
@@ -554,22 +546,22 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
554
546
fe -> pcl = container_of (grp , struct z_erofs_pcluster , obj );
555
547
} else {
556
548
tailpacking :
557
- ret = z_erofs_register_collection (fe , inode , map );
549
+ ret = z_erofs_register_pcluster (fe , inode , map );
558
550
if (!ret )
559
551
goto out ;
560
552
if (ret != - EEXIST )
561
553
return ret ;
562
554
}
563
555
564
- ret = z_erofs_lookup_collection (fe , inode , map );
556
+ ret = z_erofs_lookup_pcluster (fe , inode , map );
565
557
if (ret ) {
566
558
erofs_workgroup_put (& fe -> pcl -> obj );
567
559
return ret ;
568
560
}
569
561
570
562
out :
571
563
z_erofs_pagevec_ctor_init (& fe -> vector , Z_EROFS_NR_INLINE_PAGEVECS ,
572
- fe -> cl -> pagevec , fe -> cl -> vcnt );
564
+ fe -> pcl -> pagevec , fe -> pcl -> vcnt );
573
565
/* since file-backed online pages are traversed in reverse order */
574
566
fe -> icpage_ptr = fe -> pcl -> compressed_pages +
575
567
z_erofs_pclusterpages (fe -> pcl );
@@ -582,48 +574,36 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
582
574
*/
583
575
static void z_erofs_rcu_callback (struct rcu_head * head )
584
576
{
585
- struct z_erofs_collection * const cl =
586
- container_of (head , struct z_erofs_collection , rcu );
587
-
588
- z_erofs_free_pcluster (container_of (cl , struct z_erofs_pcluster ,
589
- primary_collection ));
577
+ z_erofs_free_pcluster (container_of (head ,
578
+ struct z_erofs_pcluster , rcu ));
590
579
}
591
580
592
581
void erofs_workgroup_free_rcu (struct erofs_workgroup * grp )
593
582
{
594
583
struct z_erofs_pcluster * const pcl =
595
584
container_of (grp , struct z_erofs_pcluster , obj );
596
- struct z_erofs_collection * const cl = z_erofs_primarycollection (pcl );
597
585
598
- call_rcu (& cl -> rcu , z_erofs_rcu_callback );
599
- }
600
-
601
- static void z_erofs_collection_put (struct z_erofs_collection * cl )
602
- {
603
- struct z_erofs_pcluster * const pcl =
604
- container_of (cl , struct z_erofs_pcluster , primary_collection );
605
-
606
- erofs_workgroup_put (& pcl -> obj );
586
+ call_rcu (& pcl -> rcu , z_erofs_rcu_callback );
607
587
}
608
588
609
589
static bool z_erofs_collector_end (struct z_erofs_decompress_frontend * fe )
610
590
{
611
- struct z_erofs_collection * cl = fe -> cl ;
591
+ struct z_erofs_pcluster * pcl = fe -> pcl ;
612
592
613
- if (!cl )
593
+ if (!pcl )
614
594
return false;
615
595
616
596
z_erofs_pagevec_ctor_exit (& fe -> vector , false);
617
- mutex_unlock (& cl -> lock );
597
+ mutex_unlock (& pcl -> lock );
618
598
619
599
/*
620
600
* if all pending pages are added, don't hold its reference
621
601
* any longer if the pcluster isn't hosted by ourselves.
622
602
*/
623
603
if (fe -> mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE )
624
- z_erofs_collection_put ( cl );
604
+ erofs_workgroup_put ( & pcl -> obj );
625
605
626
- fe -> cl = NULL ;
606
+ fe -> pcl = NULL ;
627
607
return true;
628
608
}
629
609
@@ -666,8 +646,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
666
646
/* lucky, within the range of the current map_blocks */
667
647
if (offset + cur >= map -> m_la &&
668
648
offset + cur < map -> m_la + map -> m_llen ) {
669
- /* didn't get a valid collection previously (very rare) */
670
- if (!fe -> cl )
649
+ /* didn't get a valid pcluster previously (very rare) */
650
+ if (!fe -> pcl )
671
651
goto restart_now ;
672
652
goto hitted ;
673
653
}
@@ -766,7 +746,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
766
746
/* bump up the number of spiltted parts of a page */
767
747
++ spiltted ;
768
748
/* also update nr_pages */
769
- fe -> cl -> nr_pages = max_t (pgoff_t , fe -> cl -> nr_pages , index + 1 );
749
+ fe -> pcl -> nr_pages = max_t (pgoff_t , fe -> pcl -> nr_pages , index + 1 );
770
750
next_part :
771
751
/* can be used for verification */
772
752
map -> m_llen = offset + cur - map -> m_la ;
@@ -821,15 +801,13 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
821
801
822
802
enum z_erofs_page_type page_type ;
823
803
bool overlapped , partial ;
824
- struct z_erofs_collection * cl ;
825
804
int err ;
826
805
827
806
might_sleep ();
828
- cl = z_erofs_primarycollection (pcl );
829
- DBG_BUGON (!READ_ONCE (cl -> nr_pages ));
807
+ DBG_BUGON (!READ_ONCE (pcl -> nr_pages ));
830
808
831
- mutex_lock (& cl -> lock );
832
- nr_pages = cl -> nr_pages ;
809
+ mutex_lock (& pcl -> lock );
810
+ nr_pages = pcl -> nr_pages ;
833
811
834
812
if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES ) {
835
813
pages = pages_onstack ;
@@ -857,9 +835,9 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
857
835
858
836
err = 0 ;
859
837
z_erofs_pagevec_ctor_init (& ctor , Z_EROFS_NR_INLINE_PAGEVECS ,
860
- cl -> pagevec , 0 );
838
+ pcl -> pagevec , 0 );
861
839
862
- for (i = 0 ; i < cl -> vcnt ; ++ i ) {
840
+ for (i = 0 ; i < pcl -> vcnt ; ++ i ) {
863
841
unsigned int pagenr ;
864
842
865
843
page = z_erofs_pagevec_dequeue (& ctor , & page_type );
@@ -945,11 +923,11 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
945
923
goto out ;
946
924
947
925
llen = pcl -> length >> Z_EROFS_PCLUSTER_LENGTH_BIT ;
948
- if (nr_pages << PAGE_SHIFT >= cl -> pageofs + llen ) {
926
+ if (nr_pages << PAGE_SHIFT >= pcl -> pageofs_out + llen ) {
949
927
outputsize = llen ;
950
928
partial = !(pcl -> length & Z_EROFS_PCLUSTER_FULL_LENGTH );
951
929
} else {
952
- outputsize = (nr_pages << PAGE_SHIFT ) - cl -> pageofs ;
930
+ outputsize = (nr_pages << PAGE_SHIFT ) - pcl -> pageofs_out ;
953
931
partial = true;
954
932
}
955
933
@@ -963,7 +941,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
963
941
.in = compressed_pages ,
964
942
.out = pages ,
965
943
.pageofs_in = pcl -> pageofs_in ,
966
- .pageofs_out = cl -> pageofs ,
944
+ .pageofs_out = pcl -> pageofs_out ,
967
945
.inputsize = inputsize ,
968
946
.outputsize = outputsize ,
969
947
.alg = pcl -> algorithmformat ,
@@ -1012,16 +990,12 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
1012
990
else if (pages != pages_onstack )
1013
991
kvfree (pages );
1014
992
1015
- cl -> nr_pages = 0 ;
1016
- cl -> vcnt = 0 ;
993
+ pcl -> nr_pages = 0 ;
994
+ pcl -> vcnt = 0 ;
1017
995
1018
- /* all cl locks MUST be taken before the following line */
996
+ /* pcluster lock MUST be taken before the following line */
1019
997
WRITE_ONCE (pcl -> next , Z_EROFS_PCLUSTER_NIL );
1020
-
1021
- /* all cl locks SHOULD be released right now */
1022
- mutex_unlock (& cl -> lock );
1023
-
1024
- z_erofs_collection_put (cl );
998
+ mutex_unlock (& pcl -> lock );
1025
999
return err ;
1026
1000
}
1027
1001
@@ -1043,6 +1017,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1043
1017
owned = READ_ONCE (pcl -> next );
1044
1018
1045
1019
z_erofs_decompress_pcluster (io -> sb , pcl , pagepool );
1020
+ erofs_workgroup_put (& pcl -> obj );
1046
1021
}
1047
1022
}
1048
1023
0 commit comments