Skip to content

Commit 7d2affc

Browse files
committed
Merge tag 'md-fixes-20231206' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into block-6.7
Pull MD fixes from Song: "This set from Yu Kuai fixes issues around sync_work, which was introduced in 6.7 kernels." * tag 'md-fixes-20231206' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md: md: fix stopping sync thread md: don't leave 'MD_RECOVERY_FROZEN' in error path of md_set_readonly() md: fix missing flush of sync_work
2 parents a134cd8 + f52f5c7 commit 7d2affc

File tree

1 file changed

+50
-64
lines changed

1 file changed

+50
-64
lines changed

drivers/md/md.c

Lines changed: 50 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -4840,59 +4840,72 @@ action_show(struct mddev *mddev, char *page)
48404840
return sprintf(page, "%s\n", type);
48414841
}
48424842

4843-
static void stop_sync_thread(struct mddev *mddev)
4843+
/**
4844+
* stop_sync_thread() - wait for sync_thread to stop if it's running.
4845+
* @mddev: the array.
4846+
* @locked: if set, reconfig_mutex will still be held after this function
4847+
* return; if not set, reconfig_mutex will be released after this
4848+
* function return.
4849+
* @check_seq: if set, only wait for curent running sync_thread to stop, noted
4850+
* that new sync_thread can still start.
4851+
*/
4852+
static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
48444853
{
4845-
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4846-
return;
4854+
int sync_seq;
48474855

4848-
if (mddev_lock(mddev))
4849-
return;
4856+
if (check_seq)
4857+
sync_seq = atomic_read(&mddev->sync_seq);
48504858

4851-
/*
4852-
* Check again in case MD_RECOVERY_RUNNING is cleared before lock is
4853-
* held.
4854-
*/
48554859
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4856-
mddev_unlock(mddev);
4860+
if (!locked)
4861+
mddev_unlock(mddev);
48574862
return;
48584863
}
48594864

4860-
if (work_pending(&mddev->del_work))
4861-
flush_workqueue(md_misc_wq);
4865+
mddev_unlock(mddev);
48624866

48634867
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
48644868
/*
48654869
* Thread might be blocked waiting for metadata update which will now
48664870
* never happen
48674871
*/
48684872
md_wakeup_thread_directly(mddev->sync_thread);
4873+
if (work_pending(&mddev->sync_work))
4874+
flush_work(&mddev->sync_work);
48694875

4870-
mddev_unlock(mddev);
4876+
wait_event(resync_wait,
4877+
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4878+
(check_seq && sync_seq != atomic_read(&mddev->sync_seq)));
4879+
4880+
if (locked)
4881+
mddev_lock_nointr(mddev);
48714882
}
48724883

48734884
static void idle_sync_thread(struct mddev *mddev)
48744885
{
4875-
int sync_seq = atomic_read(&mddev->sync_seq);
4876-
48774886
mutex_lock(&mddev->sync_mutex);
48784887
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4879-
stop_sync_thread(mddev);
48804888

4881-
wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
4882-
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4889+
if (mddev_lock(mddev)) {
4890+
mutex_unlock(&mddev->sync_mutex);
4891+
return;
4892+
}
48834893

4894+
stop_sync_thread(mddev, false, true);
48844895
mutex_unlock(&mddev->sync_mutex);
48854896
}
48864897

48874898
static void frozen_sync_thread(struct mddev *mddev)
48884899
{
48894900
mutex_lock(&mddev->sync_mutex);
48904901
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4891-
stop_sync_thread(mddev);
48924902

4893-
wait_event(resync_wait, mddev->sync_thread == NULL &&
4894-
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4903+
if (mddev_lock(mddev)) {
4904+
mutex_unlock(&mddev->sync_mutex);
4905+
return;
4906+
}
48954907

4908+
stop_sync_thread(mddev, false, false);
48964909
mutex_unlock(&mddev->sync_mutex);
48974910
}
48984911

@@ -6264,14 +6277,7 @@ static void md_clean(struct mddev *mddev)
62646277

62656278
static void __md_stop_writes(struct mddev *mddev)
62666279
{
6267-
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6268-
if (work_pending(&mddev->del_work))
6269-
flush_workqueue(md_misc_wq);
6270-
if (mddev->sync_thread) {
6271-
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6272-
md_reap_sync_thread(mddev);
6273-
}
6274-
6280+
stop_sync_thread(mddev, true, false);
62756281
del_timer_sync(&mddev->safemode_timer);
62766282

62776283
if (mddev->pers && mddev->pers->quiesce) {
@@ -6355,25 +6361,16 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
63556361
int err = 0;
63566362
int did_freeze = 0;
63576363

6364+
if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6365+
return -EBUSY;
6366+
63586367
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
63596368
did_freeze = 1;
63606369
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
63616370
md_wakeup_thread(mddev->thread);
63626371
}
6363-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6364-
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6365-
6366-
/*
6367-
* Thread might be blocked waiting for metadata update which will now
6368-
* never happen
6369-
*/
6370-
md_wakeup_thread_directly(mddev->sync_thread);
63716372

6372-
if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6373-
return -EBUSY;
6374-
mddev_unlock(mddev);
6375-
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6376-
&mddev->recovery));
6373+
stop_sync_thread(mddev, false, false);
63776374
wait_event(mddev->sb_wait,
63786375
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
63796376
mddev_lock_nointr(mddev);
@@ -6383,29 +6380,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
63836380
mddev->sync_thread ||
63846381
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
63856382
pr_warn("md: %s still in use.\n",mdname(mddev));
6386-
if (did_freeze) {
6387-
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6388-
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6389-
md_wakeup_thread(mddev->thread);
6390-
}
63916383
err = -EBUSY;
63926384
goto out;
63936385
}
6386+
63946387
if (mddev->pers) {
63956388
__md_stop_writes(mddev);
63966389

6397-
err = -ENXIO;
6398-
if (mddev->ro == MD_RDONLY)
6390+
if (mddev->ro == MD_RDONLY) {
6391+
err = -ENXIO;
63996392
goto out;
6393+
}
6394+
64006395
mddev->ro = MD_RDONLY;
64016396
set_disk_ro(mddev->gendisk, 1);
6397+
}
6398+
6399+
out:
6400+
if ((mddev->pers && !err) || did_freeze) {
64026401
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
64036402
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
64046403
md_wakeup_thread(mddev->thread);
64056404
sysfs_notify_dirent_safe(mddev->sysfs_state);
6406-
err = 0;
64076405
}
6408-
out:
6406+
64096407
mutex_unlock(&mddev->open_mutex);
64106408
return err;
64116409
}
@@ -6426,20 +6424,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
64266424
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
64276425
md_wakeup_thread(mddev->thread);
64286426
}
6429-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6430-
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
64316427

6432-
/*
6433-
* Thread might be blocked waiting for metadata update which will now
6434-
* never happen
6435-
*/
6436-
md_wakeup_thread_directly(mddev->sync_thread);
6437-
6438-
mddev_unlock(mddev);
6439-
wait_event(resync_wait, (mddev->sync_thread == NULL &&
6440-
!test_bit(MD_RECOVERY_RUNNING,
6441-
&mddev->recovery)));
6442-
mddev_lock_nointr(mddev);
6428+
stop_sync_thread(mddev, true, false);
64436429

64446430
mutex_lock(&mddev->open_mutex);
64456431
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||

0 commit comments

Comments
 (0)