41
41
42
42
static struct ipmi_recv_msg * ipmi_alloc_recv_msg (void );
43
43
static int ipmi_init_msghandler (void );
44
- static void smi_recv_tasklet (struct tasklet_struct * t );
44
+ static void smi_recv_work (struct work_struct * t );
45
45
static void handle_new_recv_msgs (struct ipmi_smi * intf );
46
46
static void need_waiter (struct ipmi_smi * intf );
47
47
static int handle_one_recv_msg (struct ipmi_smi * intf ,
@@ -498,13 +498,13 @@ struct ipmi_smi {
498
498
/*
499
499
* Messages queued for delivery. If delivery fails (out of memory
500
500
* for instance), They will stay in here to be processed later in a
501
- * periodic timer interrupt. The tasklet is for handling received
501
+ * periodic timer interrupt. The workqueue is for handling received
502
502
* messages directly from the handler.
503
503
*/
504
504
spinlock_t waiting_rcv_msgs_lock ;
505
505
struct list_head waiting_rcv_msgs ;
506
506
atomic_t watchdog_pretimeouts_to_deliver ;
507
- struct tasklet_struct recv_tasklet ;
507
+ struct work_struct recv_work ;
508
508
509
509
spinlock_t xmit_msgs_lock ;
510
510
struct list_head xmit_msgs ;
@@ -704,7 +704,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
704
704
struct cmd_rcvr * rcvr , * rcvr2 ;
705
705
struct list_head list ;
706
706
707
- tasklet_kill (& intf -> recv_tasklet );
707
+ cancel_work_sync (& intf -> recv_work );
708
708
709
709
free_smi_msg_list (& intf -> waiting_rcv_msgs );
710
710
free_recv_msg_list (& intf -> waiting_events );
@@ -1319,7 +1319,7 @@ static void free_user(struct kref *ref)
1319
1319
{
1320
1320
struct ipmi_user * user = container_of (ref , struct ipmi_user , refcount );
1321
1321
1322
- /* SRCU cleanup must happen in task context. */
1322
+ /* SRCU cleanup must happen in workqueue context. */
1323
1323
queue_work (remove_work_wq , & user -> remove_work );
1324
1324
}
1325
1325
@@ -3605,8 +3605,7 @@ int ipmi_add_smi(struct module *owner,
3605
3605
intf -> curr_seq = 0 ;
3606
3606
spin_lock_init (& intf -> waiting_rcv_msgs_lock );
3607
3607
INIT_LIST_HEAD (& intf -> waiting_rcv_msgs );
3608
- tasklet_setup (& intf -> recv_tasklet ,
3609
- smi_recv_tasklet );
3608
+ INIT_WORK (& intf -> recv_work , smi_recv_work );
3610
3609
atomic_set (& intf -> watchdog_pretimeouts_to_deliver , 0 );
3611
3610
spin_lock_init (& intf -> xmit_msgs_lock );
3612
3611
INIT_LIST_HEAD (& intf -> xmit_msgs );
@@ -4779,7 +4778,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
4779
4778
* To preserve message order, quit if we
4780
4779
* can't handle a message. Add the message
4781
4780
* back at the head, this is safe because this
4782
- * tasklet is the only thing that pulls the
4781
+ * workqueue is the only thing that pulls the
4783
4782
* messages.
4784
4783
*/
4785
4784
list_add (& smi_msg -> link , & intf -> waiting_rcv_msgs );
@@ -4812,10 +4811,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
4812
4811
}
4813
4812
}
4814
4813
4815
- static void smi_recv_tasklet (struct tasklet_struct * t )
4814
+ static void smi_recv_work (struct work_struct * t )
4816
4815
{
4817
4816
unsigned long flags = 0 ; /* keep us warning-free. */
4818
- struct ipmi_smi * intf = from_tasklet (intf , t , recv_tasklet );
4817
+ struct ipmi_smi * intf = from_work (intf , t , recv_work );
4819
4818
int run_to_completion = intf -> run_to_completion ;
4820
4819
struct ipmi_smi_msg * newmsg = NULL ;
4821
4820
@@ -4866,7 +4865,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
4866
4865
4867
4866
/*
4868
4867
* To preserve message order, we keep a queue and deliver from
4869
- * a tasklet .
4868
+ * a workqueue .
4870
4869
*/
4871
4870
if (!run_to_completion )
4872
4871
spin_lock_irqsave (& intf -> waiting_rcv_msgs_lock , flags );
@@ -4887,9 +4886,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
4887
4886
spin_unlock_irqrestore (& intf -> xmit_msgs_lock , flags );
4888
4887
4889
4888
if (run_to_completion )
4890
- smi_recv_tasklet (& intf -> recv_tasklet );
4889
+ smi_recv_work (& intf -> recv_work );
4891
4890
else
4892
- tasklet_schedule ( & intf -> recv_tasklet );
4891
+ queue_work ( system_bh_wq , & intf -> recv_work );
4893
4892
}
4894
4893
EXPORT_SYMBOL (ipmi_smi_msg_received );
4895
4894
@@ -4899,7 +4898,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4899
4898
return ;
4900
4899
4901
4900
atomic_set (& intf -> watchdog_pretimeouts_to_deliver , 1 );
4902
- tasklet_schedule ( & intf -> recv_tasklet );
4901
+ queue_work ( system_bh_wq , & intf -> recv_work );
4903
4902
}
4904
4903
EXPORT_SYMBOL (ipmi_smi_watchdog_pretimeout );
4905
4904
@@ -5068,7 +5067,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5068
5067
flags );
5069
5068
}
5070
5069
5071
- tasklet_schedule ( & intf -> recv_tasklet );
5070
+ queue_work ( system_bh_wq , & intf -> recv_work );
5072
5071
5073
5072
return need_timer ;
5074
5073
}
0 commit comments