@@ -1366,10 +1366,14 @@ int blkcg_activate_policy(struct request_queue *q,
1366
1366
/* alloc failed, nothing's initialized yet, free everything */
1367
1367
spin_lock_irq (& q -> queue_lock );
1368
1368
list_for_each_entry (blkg , & q -> blkg_list , q_node ) {
1369
+ struct blkcg * blkcg = blkg -> blkcg ;
1370
+
1371
+ spin_lock (& blkcg -> lock );
1369
1372
if (blkg -> pd [pol -> plid ]) {
1370
1373
pol -> pd_free_fn (blkg -> pd [pol -> plid ]);
1371
1374
blkg -> pd [pol -> plid ] = NULL ;
1372
1375
}
1376
+ spin_unlock (& blkcg -> lock );
1373
1377
}
1374
1378
spin_unlock_irq (& q -> queue_lock );
1375
1379
ret = - ENOMEM ;
@@ -1401,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
1401
1405
__clear_bit (pol -> plid , q -> blkcg_pols );
1402
1406
1403
1407
list_for_each_entry (blkg , & q -> blkg_list , q_node ) {
1408
+ struct blkcg * blkcg = blkg -> blkcg ;
1409
+
1410
+ spin_lock (& blkcg -> lock );
1404
1411
if (blkg -> pd [pol -> plid ]) {
1405
1412
if (pol -> pd_offline_fn )
1406
1413
pol -> pd_offline_fn (blkg -> pd [pol -> plid ]);
1407
1414
pol -> pd_free_fn (blkg -> pd [pol -> plid ]);
1408
1415
blkg -> pd [pol -> plid ] = NULL ;
1409
1416
}
1417
+ spin_unlock (& blkcg -> lock );
1410
1418
}
1411
1419
1412
1420
spin_unlock_irq (& q -> queue_lock );
0 commit comments