Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit a9b5bb5

Browse files
allenpaiscminyard
authored andcommitted
ipmi: Convert from tasklet to BH workqueue
The only generic interface to execute asynchronously in the BH context is tasklet; however, it's marked deprecated and has some design flaws. To replace tasklets, BH workqueue support was recently added. A BH workqueue behaves similarly to regular workqueues except that the queued work items are executed in the BH context. This patch converts drivers/char/ipmi/* from tasklet to BH workqueue. Based on the work done by Tejun Heo <tj@kernel.org> Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10 Signed-off-by: Allen Pais <allen.lkml@gmail.com> Message-Id: <20240327160314.9982-7-apais@linux.microsoft.com> [Removed a duplicate include of workqueue.h] Signed-off-by: Corey Minyard <minyard@acm.org>
1 parent 8d025e2 commit a9b5bb5

File tree

1 file changed

+14
-15
lines changed

1 file changed

+14
-15
lines changed

drivers/char/ipmi/ipmi_msghandler.c

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141

4242
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
4343
static int ipmi_init_msghandler(void);
44-
static void smi_recv_tasklet(struct tasklet_struct *t);
44+
static void smi_recv_work(struct work_struct *t);
4545
static void handle_new_recv_msgs(struct ipmi_smi *intf);
4646
static void need_waiter(struct ipmi_smi *intf);
4747
static int handle_one_recv_msg(struct ipmi_smi *intf,
@@ -498,13 +498,13 @@ struct ipmi_smi {
498498
/*
499499
* Messages queued for delivery. If delivery fails (out of memory
500500
* for instance), They will stay in here to be processed later in a
501-
* periodic timer interrupt. The tasklet is for handling received
501+
* periodic timer interrupt. The workqueue is for handling received
502502
* messages directly from the handler.
503503
*/
504504
spinlock_t waiting_rcv_msgs_lock;
505505
struct list_head waiting_rcv_msgs;
506506
atomic_t watchdog_pretimeouts_to_deliver;
507-
struct tasklet_struct recv_tasklet;
507+
struct work_struct recv_work;
508508

509509
spinlock_t xmit_msgs_lock;
510510
struct list_head xmit_msgs;
@@ -704,7 +704,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
704704
struct cmd_rcvr *rcvr, *rcvr2;
705705
struct list_head list;
706706

707-
tasklet_kill(&intf->recv_tasklet);
707+
cancel_work_sync(&intf->recv_work);
708708

709709
free_smi_msg_list(&intf->waiting_rcv_msgs);
710710
free_recv_msg_list(&intf->waiting_events);
@@ -1319,7 +1319,7 @@ static void free_user(struct kref *ref)
13191319
{
13201320
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
13211321

1322-
/* SRCU cleanup must happen in task context. */
1322+
/* SRCU cleanup must happen in workqueue context. */
13231323
queue_work(remove_work_wq, &user->remove_work);
13241324
}
13251325

@@ -3605,8 +3605,7 @@ int ipmi_add_smi(struct module *owner,
36053605
intf->curr_seq = 0;
36063606
spin_lock_init(&intf->waiting_rcv_msgs_lock);
36073607
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3608-
tasklet_setup(&intf->recv_tasklet,
3609-
smi_recv_tasklet);
3608+
INIT_WORK(&intf->recv_work, smi_recv_work);
36103609
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
36113610
spin_lock_init(&intf->xmit_msgs_lock);
36123611
INIT_LIST_HEAD(&intf->xmit_msgs);
@@ -4779,7 +4778,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
47794778
* To preserve message order, quit if we
47804779
* can't handle a message. Add the message
47814780
* back at the head, this is safe because this
4782-
* tasklet is the only thing that pulls the
4781+
* workqueue is the only thing that pulls the
47834782
* messages.
47844783
*/
47854784
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
@@ -4812,10 +4811,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
48124811
}
48134812
}
48144813

4815-
static void smi_recv_tasklet(struct tasklet_struct *t)
4814+
static void smi_recv_work(struct work_struct *t)
48164815
{
48174816
unsigned long flags = 0; /* keep us warning-free. */
4818-
struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4817+
struct ipmi_smi *intf = from_work(intf, t, recv_work);
48194818
int run_to_completion = intf->run_to_completion;
48204819
struct ipmi_smi_msg *newmsg = NULL;
48214820

@@ -4866,7 +4865,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
48664865

48674866
/*
48684867
* To preserve message order, we keep a queue and deliver from
4869-
* a tasklet.
4868+
* a workqueue.
48704869
*/
48714870
if (!run_to_completion)
48724871
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
@@ -4887,9 +4886,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
48874886
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
48884887

48894888
if (run_to_completion)
4890-
smi_recv_tasklet(&intf->recv_tasklet);
4889+
smi_recv_work(&intf->recv_work);
48914890
else
4892-
tasklet_schedule(&intf->recv_tasklet);
4891+
queue_work(system_bh_wq, &intf->recv_work);
48934892
}
48944893
EXPORT_SYMBOL(ipmi_smi_msg_received);
48954894

@@ -4899,7 +4898,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
48994898
return;
49004899

49014900
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4902-
tasklet_schedule(&intf->recv_tasklet);
4901+
queue_work(system_bh_wq, &intf->recv_work);
49034902
}
49044903
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
49054904

@@ -5068,7 +5067,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
50685067
flags);
50695068
}
50705069

5071-
tasklet_schedule(&intf->recv_tasklet);
5070+
queue_work(system_bh_wq, &intf->recv_work);
50725071

50735072
return need_timer;
50745073
}

0 commit comments

Comments
 (0)