Skip to content

Commit e30a106

Browse files
committed
cxl/pci: Cleanup 'sanitize' to always poll
In preparation for fixing the init/teardown of the 'sanitize' workqueue and sysfs notification mechanism, arrange for cxl_mbox_sanitize_work() to be the single location where the sysfs attribute is notified. With that change there is no distinction between polled mode and interrupt mode. All the interrupt does is accelerate the polling interval. The change to check for "mds->security.sanitize_node" under the lock is there to ensure that the interrupt, the work routine and the setup/teardown code can all have a consistent view of the registered notifier and the workqueue state. I.e. the expectation is that the interrupt is live past the point that the sanitize sysfs attribute is published, and it may race teardown, so it must be consulted under a lock. Given that new locking requirement, cxl_pci_mbox_irq() is moved from hard to thread irq context. Lastly, some opportunistic replacements of "queue_delayed_work(system_wq, ...)", which is just open coded schedule_delayed_work(), are included. Reviewed-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 76fe871 commit e30a106

File tree

3 files changed

+26
-39
lines changed

3 files changed

+26
-39
lines changed

drivers/cxl/core/memdev.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -561,8 +561,7 @@ static void cxl_memdev_security_shutdown(struct device *dev)
561561
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
562562
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
563563

564-
if (mds->security.poll)
565-
cancel_delayed_work_sync(&mds->security.poll_dwork);
564+
cancel_delayed_work_sync(&mds->security.poll_dwork);
566565
}
567566

568567
static void cxl_memdev_shutdown(struct device *dev)

drivers/cxl/cxlmem.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -360,15 +360,13 @@ struct cxl_fw_state {
360360
*
361361
* @state: state of last security operation
362362
* @enabled_cmds: All security commands enabled in the CEL
363-
* @poll: polling for sanitization is enabled, device has no mbox irq support
364363
* @poll_tmo_secs: polling timeout
365364
* @poll_dwork: polling work item
366365
* @sanitize_node: sanitation sysfs file to notify
367366
*/
368367
struct cxl_security_state {
369368
unsigned long state;
370369
DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
371-
bool poll;
372370
int poll_tmo_secs;
373371
struct delayed_work poll_dwork;
374372
struct kernfs_node *sanitize_node;

drivers/cxl/pci.c

Lines changed: 25 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
128128
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
129129
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
130130
if (opcode == CXL_MBOX_OP_SANITIZE) {
131+
mutex_lock(&mds->mbox_mutex);
131132
if (mds->security.sanitize_node)
132-
sysfs_notify_dirent(mds->security.sanitize_node);
133-
134-
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
133+
mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
134+
mutex_unlock(&mds->mbox_mutex);
135135
} else {
136136
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
137137
rcuwait_wake_up(&mds->mbox_wait);
@@ -160,8 +160,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
160160
int timeout = mds->security.poll_tmo_secs + 10;
161161

162162
mds->security.poll_tmo_secs = min(15 * 60, timeout);
163-
queue_delayed_work(system_wq, &mds->security.poll_dwork,
164-
timeout * HZ);
163+
schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
165164
}
166165
mutex_unlock(&mds->mbox_mutex);
167166
}
@@ -293,15 +292,11 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
293292
* and allow userspace to poll(2) for completion.
294293
*/
295294
if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
296-
if (mds->security.poll) {
297-
/* give first timeout a second */
298-
timeout = 1;
299-
mds->security.poll_tmo_secs = timeout;
300-
queue_delayed_work(system_wq,
301-
&mds->security.poll_dwork,
302-
timeout * HZ);
303-
}
304-
295+
/* give first timeout a second */
296+
timeout = 1;
297+
mds->security.poll_tmo_secs = timeout;
298+
schedule_delayed_work(&mds->security.poll_dwork,
299+
timeout * HZ);
305300
dev_dbg(dev, "Sanitization operation started\n");
306301
goto success;
307302
}
@@ -384,7 +379,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
384379
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
385380
struct device *dev = cxlds->dev;
386381
unsigned long timeout;
382+
int irq, msgnum;
387383
u64 md_status;
384+
u32 ctrl;
388385

389386
timeout = jiffies + mbox_ready_timeout * HZ;
390387
do {
@@ -432,33 +429,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
432429
dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
433430

434431
rcuwait_init(&mds->mbox_wait);
432+
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
435433

436-
if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
437-
u32 ctrl;
438-
int irq, msgnum;
439-
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
440-
441-
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
442-
irq = pci_irq_vector(pdev, msgnum);
443-
if (irq < 0)
444-
goto mbox_poll;
445-
446-
if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
447-
goto mbox_poll;
434+
/* background command interrupts are optional */
435+
if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
436+
return 0;
448437

449-
/* enable background command mbox irq support */
450-
ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
451-
ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
452-
writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
438+
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
439+
irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
440+
if (irq < 0)
441+
return 0;
453442

443+
if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
454444
return 0;
455-
}
456445

457-
mbox_poll:
458-
mds->security.poll = true;
459-
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
446+
dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
447+
/* enable background command mbox irq support */
448+
ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
449+
ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
450+
writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
460451

461-
dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
462452
return 0;
463453
}
464454

0 commit comments

Comments
 (0)