Skip to content

Commit c9bb139

Browse files
committed
Merge tag 'nvme-6.15-2025-05-15' of git://git.infradead.org/nvme into block-6.15
Pull NVMe fixes from Christoph: "nvme fixes for linux 6.15 - fixes for atomic writes (Alan Adamson) - fixes for polled CQs in nvmet-epf (Damien Le Moal) - fix for polled CQs in nvme-pci (Keith Busch) - fix compile on odd configs that need to be forced to inline (Kees Cook) - one more quirk (Ilya Guterman)" * tag 'nvme-6.15-2025-05-15' of git://git.infradead.org/nvme: nvme-pci: add NVME_QUIRK_NO_DEEPEST_PS quirk for SOLIDIGM P44 Pro nvme: all namespaces in a subsystem must adhere to a common atomic write size nvme: multipath: enable BLK_FEAT_ATOMIC_WRITES for multipathing nvmet: pci-epf: remove NVMET_PCI_EPF_Q_IS_SQ nvmet: pci-epf: improve debug message nvmet: pci-epf: cleanup nvmet_pci_epf_raise_irq() nvmet: pci-epf: do not fall back to using INTX if not supported nvmet: pci-epf: clear completion queue IRQ flag on delete nvme-pci: acquire cq_poll_lock in nvme_poll_irqdisable nvme-pci: make nvme_pci_npages_prp() __always_inline
2 parents 8098514 + e765bf8 commit c9bb139

File tree

5 files changed

+59
-22
lines changed

5 files changed

+59
-22
lines changed

drivers/nvme/host/core.c

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2059,7 +2059,21 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
20592059
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
20602060
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
20612061
else
2062-
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
2062+
atomic_bs = (1 + ns->ctrl->awupf) * bs;
2063+
2064+
/*
2065+
* Set subsystem atomic bs.
2066+
*/
2067+
if (ns->ctrl->subsys->atomic_bs) {
2068+
if (atomic_bs != ns->ctrl->subsys->atomic_bs) {
2069+
dev_err_ratelimited(ns->ctrl->device,
2070+
"%s: Inconsistent Atomic Write Size, Namespace will not be added: Subsystem=%d bytes, Controller/Namespace=%d bytes\n",
2071+
ns->disk ? ns->disk->disk_name : "?",
2072+
ns->ctrl->subsys->atomic_bs,
2073+
atomic_bs);
2074+
}
2075+
} else
2076+
ns->ctrl->subsys->atomic_bs = atomic_bs;
20632077

20642078
nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs);
20652079
}
@@ -2201,6 +2215,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
22012215
nvme_set_chunk_sectors(ns, id, &lim);
22022216
if (!nvme_update_disk_info(ns, id, &lim))
22032217
capacity = 0;
2218+
2219+
/*
2220+
* Validate the max atomic write size fits within the subsystem's
2221+
* atomic write capabilities.
2222+
*/
2223+
if (lim.atomic_write_hw_max > ns->ctrl->subsys->atomic_bs) {
2224+
blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2225+
ret = -ENXIO;
2226+
goto out;
2227+
}
2228+
22042229
nvme_config_discard(ns, &lim);
22052230
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
22062231
ns->head->ids.csi == NVME_CSI_ZNS)
@@ -3031,7 +3056,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
30313056
kfree(subsys);
30323057
return -EINVAL;
30333058
}
3034-
subsys->awupf = le16_to_cpu(id->awupf);
30353059
nvme_mpath_default_iopolicy(subsys);
30363060

30373061
subsys->dev.class = &nvme_subsys_class;
@@ -3441,7 +3465,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
34413465
dev_pm_qos_expose_latency_tolerance(ctrl->device);
34423466
else if (!ctrl->apst_enabled && prev_apst_enabled)
34433467
dev_pm_qos_hide_latency_tolerance(ctrl->device);
3444-
3468+
ctrl->awupf = le16_to_cpu(id->awupf);
34453469
out_free:
34463470
kfree(id);
34473471
return ret;

drivers/nvme/host/multipath.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -638,7 +638,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
638638

639639
blk_set_stacking_limits(&lim);
640640
lim.dma_alignment = 3;
641-
lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
641+
lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT |
642+
BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES;
642643
if (head->ids.csi == NVME_CSI_ZNS)
643644
lim.features |= BLK_FEAT_ZONED;
644645

drivers/nvme/host/nvme.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,7 @@ struct nvme_ctrl {
410410

411411
enum nvme_ctrl_type cntrltype;
412412
enum nvme_dctype dctype;
413+
u16 awupf; /* 0's based value. */
413414
};
414415

415416
static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
@@ -442,11 +443,11 @@ struct nvme_subsystem {
442443
u8 cmic;
443444
enum nvme_subsys_type subtype;
444445
u16 vendor_id;
445-
u16 awupf; /* 0's based awupf value. */
446446
struct ida ns_ida;
447447
#ifdef CONFIG_NVME_MULTIPATH
448448
enum nvme_iopolicy iopolicy;
449449
#endif
450+
u32 atomic_bs;
450451
};
451452

452453
/*

drivers/nvme/host/pci.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
390390
* as it only leads to a small amount of wasted memory for the lifetime of
391391
* the I/O.
392392
*/
393-
static int nvme_pci_npages_prp(void)
393+
static __always_inline int nvme_pci_npages_prp(void)
394394
{
395395
unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
396396
unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
@@ -1202,7 +1202,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
12021202
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
12031203

12041204
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1205+
spin_lock(&nvmeq->cq_poll_lock);
12051206
nvme_poll_cq(nvmeq, NULL);
1207+
spin_unlock(&nvmeq->cq_poll_lock);
12061208
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
12071209
}
12081210

@@ -3737,6 +3739,8 @@ static const struct pci_device_id nvme_id_table[] = {
37373739
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
37383740
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
37393741
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
3742+
{ PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
3743+
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
37403744
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
37413745
.driver_data = NVME_QUIRK_BOGUS_NID, },
37423746
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */

drivers/nvme/target/pci-epf.c

Lines changed: 23 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
6262
#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
6363

6464
enum nvmet_pci_epf_queue_flags {
65-
NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */
66-
NVMET_PCI_EPF_Q_LIVE, /* The queue is live */
65+
NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */
6766
NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
6867
};
6968

@@ -596,9 +595,6 @@ static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
596595
struct nvmet_pci_epf_irq_vector *iv = cq->iv;
597596
bool ret;
598597

599-
if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
600-
return false;
601-
602598
/* IRQ coalescing for the admin queue is not allowed. */
603599
if (!cq->qid)
604600
return true;
@@ -625,7 +621,8 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
625621
struct pci_epf *epf = nvme_epf->epf;
626622
int ret = 0;
627623

628-
if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
624+
if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
625+
!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
629626
return;
630627

631628
mutex_lock(&ctrl->irq_lock);
@@ -636,14 +633,16 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
636633
switch (nvme_epf->irq_type) {
637634
case PCI_IRQ_MSIX:
638635
case PCI_IRQ_MSI:
636+
/*
637+
* If we fail to raise an MSI or MSI-X interrupt, it is likely
638+
* because the host is using legacy INTX IRQs (e.g. BIOS,
639+
* grub), but we can fallback to the INTX type only if the
640+
* endpoint controller supports this type.
641+
*/
639642
ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
640643
nvme_epf->irq_type, cq->vector + 1);
641-
if (!ret)
644+
if (!ret || !nvme_epf->epc_features->intx_capable)
642645
break;
643-
/*
644-
* If we got an error, it is likely because the host is using
645-
* legacy IRQs (e.g. BIOS, grub).
646-
*/
647646
fallthrough;
648647
case PCI_IRQ_INTX:
649648
ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
@@ -656,7 +655,9 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
656655
}
657656

658657
if (ret)
659-
dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
658+
dev_err_ratelimited(ctrl->dev,
659+
"CQ[%u]: Failed to raise IRQ (err=%d)\n",
660+
cq->qid, ret);
660661

661662
unlock:
662663
mutex_unlock(&ctrl->irq_lock);
@@ -1319,8 +1320,14 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
13191320

13201321
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
13211322

1322-
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
1323-
cqid, qsize, cq->qes, cq->vector);
1323+
if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1324+
dev_dbg(ctrl->dev,
1325+
"CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
1326+
cqid, qsize, cq->qes, cq->vector);
1327+
else
1328+
dev_dbg(ctrl->dev,
1329+
"CQ[%u]: %u entries of %zu B, IRQ disabled\n",
1330+
cqid, qsize, cq->qes);
13241331

13251332
return NVME_SC_SUCCESS;
13261333

@@ -1344,7 +1351,8 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
13441351

13451352
cancel_delayed_work_sync(&cq->work);
13461353
nvmet_pci_epf_drain_queue(cq);
1347-
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1354+
if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1355+
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
13481356
nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
13491357

13501358
return NVME_SC_SUCCESS;
@@ -1533,7 +1541,6 @@ static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
15331541

15341542
if (sq) {
15351543
queue = &ctrl->sq[qid];
1536-
set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags);
15371544
} else {
15381545
queue = &ctrl->cq[qid];
15391546
INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);

0 commit comments

Comments
 (0)