@@ -4840,59 +4840,72 @@ action_show(struct mddev *mddev, char *page)
4840
4840
return sprintf (page , "%s\n" , type );
4841
4841
}
4842
4842
4843
- static void stop_sync_thread (struct mddev * mddev )
4843
+ /**
4844
+ * stop_sync_thread() - wait for sync_thread to stop if it's running.
4845
+ * @mddev: the array.
4846
+ * @locked: if set, reconfig_mutex will still be held after this function
4847
+ * return; if not set, reconfig_mutex will be released after this
4848
+ * function return.
4849
+ * @check_seq: if set, only wait for curent running sync_thread to stop, noted
4850
+ * that new sync_thread can still start.
4851
+ */
4852
+ static void stop_sync_thread (struct mddev * mddev , bool locked , bool check_seq )
4844
4853
{
4845
- if (!test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ))
4846
- return ;
4854
+ int sync_seq ;
4847
4855
4848
- if (mddev_lock ( mddev ) )
4849
- return ;
4856
+ if (check_seq )
4857
+ sync_seq = atomic_read ( & mddev -> sync_seq ) ;
4850
4858
4851
- /*
4852
- * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
4853
- * held.
4854
- */
4855
4859
if (!test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery )) {
4856
- mddev_unlock (mddev );
4860
+ if (!locked )
4861
+ mddev_unlock (mddev );
4857
4862
return ;
4858
4863
}
4859
4864
4860
- if (work_pending (& mddev -> del_work ))
4861
- flush_workqueue (md_misc_wq );
4865
+ mddev_unlock (mddev );
4862
4866
4863
4867
set_bit (MD_RECOVERY_INTR , & mddev -> recovery );
4864
4868
/*
4865
4869
* Thread might be blocked waiting for metadata update which will now
4866
4870
* never happen
4867
4871
*/
4868
4872
md_wakeup_thread_directly (mddev -> sync_thread );
4873
+ if (work_pending (& mddev -> sync_work ))
4874
+ flush_work (& mddev -> sync_work );
4869
4875
4870
- mddev_unlock (mddev );
4876
+ wait_event (resync_wait ,
4877
+ !test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ) ||
4878
+ (check_seq && sync_seq != atomic_read (& mddev -> sync_seq )));
4879
+
4880
+ if (locked )
4881
+ mddev_lock_nointr (mddev );
4871
4882
}
4872
4883
4873
4884
static void idle_sync_thread (struct mddev * mddev )
4874
4885
{
4875
- int sync_seq = atomic_read (& mddev -> sync_seq );
4876
-
4877
4886
mutex_lock (& mddev -> sync_mutex );
4878
4887
clear_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
4879
- stop_sync_thread (mddev );
4880
4888
4881
- wait_event (resync_wait , sync_seq != atomic_read (& mddev -> sync_seq ) ||
4882
- !test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ));
4889
+ if (mddev_lock (mddev )) {
4890
+ mutex_unlock (& mddev -> sync_mutex );
4891
+ return ;
4892
+ }
4883
4893
4894
+ stop_sync_thread (mddev , false, true);
4884
4895
mutex_unlock (& mddev -> sync_mutex );
4885
4896
}
4886
4897
4887
4898
static void frozen_sync_thread (struct mddev * mddev )
4888
4899
{
4889
4900
mutex_lock (& mddev -> sync_mutex );
4890
4901
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
4891
- stop_sync_thread (mddev );
4892
4902
4893
- wait_event (resync_wait , mddev -> sync_thread == NULL &&
4894
- !test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ));
4903
+ if (mddev_lock (mddev )) {
4904
+ mutex_unlock (& mddev -> sync_mutex );
4905
+ return ;
4906
+ }
4895
4907
4908
+ stop_sync_thread (mddev , false, false);
4896
4909
mutex_unlock (& mddev -> sync_mutex );
4897
4910
}
4898
4911
@@ -6264,14 +6277,7 @@ static void md_clean(struct mddev *mddev)
6264
6277
6265
6278
static void __md_stop_writes (struct mddev * mddev )
6266
6279
{
6267
- set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6268
- if (work_pending (& mddev -> del_work ))
6269
- flush_workqueue (md_misc_wq );
6270
- if (mddev -> sync_thread ) {
6271
- set_bit (MD_RECOVERY_INTR , & mddev -> recovery );
6272
- md_reap_sync_thread (mddev );
6273
- }
6274
-
6280
+ stop_sync_thread (mddev , true, false);
6275
6281
del_timer_sync (& mddev -> safemode_timer );
6276
6282
6277
6283
if (mddev -> pers && mddev -> pers -> quiesce ) {
@@ -6355,25 +6361,16 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6355
6361
int err = 0 ;
6356
6362
int did_freeze = 0 ;
6357
6363
6364
+ if (mddev -> external && test_bit (MD_SB_CHANGE_PENDING , & mddev -> sb_flags ))
6365
+ return - EBUSY ;
6366
+
6358
6367
if (!test_bit (MD_RECOVERY_FROZEN , & mddev -> recovery )) {
6359
6368
did_freeze = 1 ;
6360
6369
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6361
6370
md_wakeup_thread (mddev -> thread );
6362
6371
}
6363
- if (test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ))
6364
- set_bit (MD_RECOVERY_INTR , & mddev -> recovery );
6365
-
6366
- /*
6367
- * Thread might be blocked waiting for metadata update which will now
6368
- * never happen
6369
- */
6370
- md_wakeup_thread_directly (mddev -> sync_thread );
6371
6372
6372
- if (mddev -> external && test_bit (MD_SB_CHANGE_PENDING , & mddev -> sb_flags ))
6373
- return - EBUSY ;
6374
- mddev_unlock (mddev );
6375
- wait_event (resync_wait , !test_bit (MD_RECOVERY_RUNNING ,
6376
- & mddev -> recovery ));
6373
+ stop_sync_thread (mddev , false, false);
6377
6374
wait_event (mddev -> sb_wait ,
6378
6375
!test_bit (MD_SB_CHANGE_PENDING , & mddev -> sb_flags ));
6379
6376
mddev_lock_nointr (mddev );
@@ -6383,29 +6380,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6383
6380
mddev -> sync_thread ||
6384
6381
test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery )) {
6385
6382
pr_warn ("md: %s still in use.\n" ,mdname (mddev ));
6386
- if (did_freeze ) {
6387
- clear_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6388
- set_bit (MD_RECOVERY_NEEDED , & mddev -> recovery );
6389
- md_wakeup_thread (mddev -> thread );
6390
- }
6391
6383
err = - EBUSY ;
6392
6384
goto out ;
6393
6385
}
6386
+
6394
6387
if (mddev -> pers ) {
6395
6388
__md_stop_writes (mddev );
6396
6389
6397
- err = - ENXIO ;
6398
- if ( mddev -> ro == MD_RDONLY )
6390
+ if ( mddev -> ro == MD_RDONLY ) {
6391
+ err = - ENXIO ;
6399
6392
goto out ;
6393
+ }
6394
+
6400
6395
mddev -> ro = MD_RDONLY ;
6401
6396
set_disk_ro (mddev -> gendisk , 1 );
6397
+ }
6398
+
6399
+ out :
6400
+ if ((mddev -> pers && !err ) || did_freeze ) {
6402
6401
clear_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6403
6402
set_bit (MD_RECOVERY_NEEDED , & mddev -> recovery );
6404
6403
md_wakeup_thread (mddev -> thread );
6405
6404
sysfs_notify_dirent_safe (mddev -> sysfs_state );
6406
- err = 0 ;
6407
6405
}
6408
- out :
6406
+
6409
6407
mutex_unlock (& mddev -> open_mutex );
6410
6408
return err ;
6411
6409
}
@@ -6426,20 +6424,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
6426
6424
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6427
6425
md_wakeup_thread (mddev -> thread );
6428
6426
}
6429
- if (test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ))
6430
- set_bit (MD_RECOVERY_INTR , & mddev -> recovery );
6431
6427
6432
- /*
6433
- * Thread might be blocked waiting for metadata update which will now
6434
- * never happen
6435
- */
6436
- md_wakeup_thread_directly (mddev -> sync_thread );
6437
-
6438
- mddev_unlock (mddev );
6439
- wait_event (resync_wait , (mddev -> sync_thread == NULL &&
6440
- !test_bit (MD_RECOVERY_RUNNING ,
6441
- & mddev -> recovery )));
6442
- mddev_lock_nointr (mddev );
6428
+ stop_sync_thread (mddev , true, false);
6443
6429
6444
6430
mutex_lock (& mddev -> open_mutex );
6445
6431
if ((mddev -> pers && atomic_read (& mddev -> openers ) > !!bdev ) ||
0 commit comments