Skip to content

Commit e3111d9

Browse files
committed
Merge tag 'block-6.9-20240322' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe: - NVMe pull request via Keith: - Make an informative message less ominous (Keith) - Enhanced trace decoding (Guixin) - TCP updates (Hannes, Li) - Fabrics connect deadlock fix (Chunguang) - Platform API migration update (Uwe) - A new device quirk (Jiawei) - Remove dead assignment in fd (Yufeng) * tag 'block-6.9-20240322' of git://git.kernel.dk/linux: nvmet-rdma: remove NVMET_RDMA_REQ_INVALIDATE_RKEY flag nvme: remove redundant BUILD_BUG_ON check floppy: remove duplicated code in redo_fd_request() nvme/tcp: Add wq_unbound modparam for nvme_tcp_wq nvme-tcp: Export the nvme_tcp_wq to sysfs drivers/nvme: Add quirks for device 126f:2262 nvme: parse format command's lbafu when tracing nvme: add tracing of reservation commands nvme: parse zns command's zsa and zrasf to string nvme: use nvme_disk_is_ns_head helper nvme: fix reconnection fail due to reserved tag allocation nvmet: add tracing of zns commands nvmet: add tracing of authentication commands nvme-apple: Convert to platform remove callback returning void nvmet-tcp: do not continue for invalid icreq nvme: change shutdown timeout setting message
2 parents 19dba09 + 0760267 commit e3111d9

File tree

12 files changed

+233
-34
lines changed

12 files changed

+233
-34
lines changed

drivers/block/floppy.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2787,7 +2787,6 @@ static void redo_fd_request(void)
27872787
pending = set_next_request();
27882788
spin_unlock_irq(&floppy_lock);
27892789
if (!pending) {
2790-
do_floppy = NULL;
27912790
unlock_fdc();
27922791
return;
27932792
}

drivers/nvme/host/apple.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1532,7 +1532,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
15321532
return ret;
15331533
}
15341534

1535-
static int apple_nvme_remove(struct platform_device *pdev)
1535+
static void apple_nvme_remove(struct platform_device *pdev)
15361536
{
15371537
struct apple_nvme *anv = platform_get_drvdata(pdev);
15381538

@@ -1547,8 +1547,6 @@ static int apple_nvme_remove(struct platform_device *pdev)
15471547
apple_rtkit_shutdown(anv->rtk);
15481548

15491549
apple_nvme_detach_genpd(anv);
1550-
1551-
return 0;
15521550
}
15531551

15541552
static void apple_nvme_shutdown(struct platform_device *pdev)
@@ -1598,7 +1596,7 @@ static struct platform_driver apple_nvme_driver = {
15981596
.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
15991597
},
16001598
.probe = apple_nvme_probe,
1601-
.remove = apple_nvme_remove,
1599+
.remove_new = apple_nvme_remove,
16021600
.shutdown = apple_nvme_shutdown,
16031601
};
16041602
module_platform_driver(apple_nvme_driver);

drivers/nvme/host/core.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1807,9 +1807,6 @@ static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
18071807
{
18081808
struct nvme_ctrl *ctrl = ns->ctrl;
18091809

1810-
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1811-
NVME_DSM_MAX_RANGES);
1812-
18131810
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
18141811
lim->max_hw_discard_sectors =
18151812
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
@@ -3237,7 +3234,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
32373234

32383235
if (ctrl->shutdown_timeout != shutdown_timeout)
32393236
dev_info(ctrl->device,
3240-
"Shutdown timeout set to %u seconds\n",
3237+
"D3 entry latency set to %u seconds\n",
32413238
ctrl->shutdown_timeout);
32423239
} else
32433240
ctrl->shutdown_timeout = shutdown_timeout;
@@ -4391,7 +4388,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
43914388
set->ops = ops;
43924389
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
43934390
if (ctrl->ops->flags & NVME_F_FABRICS)
4394-
set->reserved_tags = NVMF_RESERVED_TAGS;
4391+
/* Reserved for fabric connect and keep alive */
4392+
set->reserved_tags = 2;
43954393
set->numa_node = ctrl->numa_node;
43964394
set->flags = BLK_MQ_F_NO_SCHED;
43974395
if (ctrl->ops->flags & NVME_F_BLOCKING)
@@ -4460,7 +4458,8 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
44604458
if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
44614459
set->reserved_tags = NVME_AQ_DEPTH;
44624460
else if (ctrl->ops->flags & NVME_F_FABRICS)
4463-
set->reserved_tags = NVMF_RESERVED_TAGS;
4461+
/* Reserved for fabric connect */
4462+
set->reserved_tags = 1;
44644463
set->numa_node = ctrl->numa_node;
44654464
set->flags = BLK_MQ_F_SHOULD_MERGE;
44664465
if (ctrl->ops->flags & NVME_F_BLOCKING)

drivers/nvme/host/fabrics.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,6 @@
1818
/* default is -1: the fail fast mechanism is disabled */
1919
#define NVMF_DEF_FAIL_FAST_TMO -1
2020

21-
/*
22-
* Reserved one command for internal usage. This command is used for sending
23-
* the connect command, as well as for the keep alive command on the admin
24-
* queue once live.
25-
*/
26-
#define NVMF_RESERVED_TAGS 1
27-
2821
/*
2922
* Define a host as seen by the target. We allocate one at boot, but also
3023
* allow the override it when creating controllers. This is both to provide

drivers/nvme/host/pci.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3363,6 +3363,9 @@ static const struct pci_device_id nvme_id_table[] = {
33633363
NVME_QUIRK_BOGUS_NID, },
33643364
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
33653365
.driver_data = NVME_QUIRK_BOGUS_NID, },
3366+
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
3367+
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3368+
NVME_QUIRK_BOGUS_NID, },
33663369
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
33673370
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
33683371
NVME_QUIRK_BOGUS_NID, },

drivers/nvme/host/pr.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
9797
static int nvme_send_pr_command(struct block_device *bdev,
9898
struct nvme_command *c, void *data, unsigned int data_len)
9999
{
100-
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
101-
nvme_disk_is_ns_head(bdev->bd_disk))
100+
if (nvme_disk_is_ns_head(bdev->bd_disk))
102101
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
103102

104103
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,

drivers/nvme/host/sysfs.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,8 +236,7 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
236236
struct block_device *bdev = disk->part0;
237237
int ret;
238238

239-
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
240-
bdev->bd_disk->fops == &nvme_ns_head_ops)
239+
if (nvme_disk_is_ns_head(bdev->bd_disk))
241240
ret = ns_head_update_nuse(head);
242241
else
243242
ret = ns_update_nuse(bdev->bd_disk->private_data);

drivers/nvme/host/tcp.c

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,14 @@ static int so_priority;
3636
module_param(so_priority, int, 0644);
3737
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
3838

39+
/*
40+
* Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
41+
* from sysfs.
42+
*/
43+
static bool wq_unbound;
44+
module_param(wq_unbound, bool, 0644);
45+
MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
46+
3947
/*
4048
* TLS handshake timeout
4149
*/
@@ -1546,7 +1554,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
15461554
else if (nvme_tcp_poll_queue(queue))
15471555
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
15481556
ctrl->io_queues[HCTX_TYPE_READ] - 1;
1549-
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1557+
if (wq_unbound)
1558+
queue->io_cpu = WORK_CPU_UNBOUND;
1559+
else
1560+
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
15501561
}
15511562

15521563
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -2785,6 +2796,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
27852796

27862797
static int __init nvme_tcp_init_module(void)
27872798
{
2799+
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
2800+
27882801
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
27892802
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
27902803
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
@@ -2794,8 +2807,10 @@ static int __init nvme_tcp_init_module(void)
27942807
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
27952808
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
27962809

2797-
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2798-
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2810+
if (wq_unbound)
2811+
wq_flags |= WQ_UNBOUND;
2812+
2813+
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
27992814
if (!nvme_tcp_wq)
28002815
return -ENOMEM;
28012816

drivers/nvme/host/trace.c

Lines changed: 101 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,10 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
119119
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
120120
{
121121
const char *ret = trace_seq_buffer_ptr(p);
122-
u8 lbaf = cdw10[0] & 0xF;
122+
/*
123+
* lbafu(bit 13:12) is already in the upper 4 bits, lbafl: bit 03:00.
124+
*/
125+
u8 lbaf = (cdw10[1] & 0x30) | (cdw10[0] & 0xF);
123126
u8 mset = (cdw10[0] >> 4) & 0x1;
124127
u8 pi = (cdw10[0] >> 5) & 0x7;
125128
u8 pil = cdw10[1] & 0x1;
@@ -164,28 +167,114 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
164167

165168
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
166169
{
170+
static const char * const zsa_strs[] = {
171+
[0x01] = "close zone",
172+
[0x02] = "finish zone",
173+
[0x03] = "open zone",
174+
[0x04] = "reset zone",
175+
[0x05] = "offline zone",
176+
[0x10] = "set zone descriptor extension"
177+
};
167178
const char *ret = trace_seq_buffer_ptr(p);
168179
u64 slba = get_unaligned_le64(cdw10);
180+
const char *zsa_str;
169181
u8 zsa = cdw10[12];
170182
u8 all = cdw10[13];
171183

172-
trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
184+
if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
185+
zsa_str = zsa_strs[zsa];
186+
else
187+
zsa_str = "reserved";
188+
189+
trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
190+
slba, zsa, zsa_str, all);
173191
trace_seq_putc(p, 0);
174192

175193
return ret;
176194
}
177195

178196
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
179197
{
198+
static const char * const zrasf_strs[] = {
199+
[0x00] = "list all zones",
200+
[0x01] = "list the zones in the ZSE: Empty state",
201+
[0x02] = "list the zones in the ZSIO: Implicitly Opened state",
202+
[0x03] = "list the zones in the ZSEO: Explicitly Opened state",
203+
[0x04] = "list the zones in the ZSC: Closed state",
204+
[0x05] = "list the zones in the ZSF: Full state",
205+
[0x06] = "list the zones in the ZSRO: Read Only state",
206+
[0x07] = "list the zones in the ZSO: Offline state",
207+
[0x09] = "list the zones that have the zone attribute"
208+
};
180209
const char *ret = trace_seq_buffer_ptr(p);
181210
u64 slba = get_unaligned_le64(cdw10);
182211
u32 numd = get_unaligned_le32(cdw10 + 8);
183212
u8 zra = cdw10[12];
184213
u8 zrasf = cdw10[13];
214+
const char *zrasf_str;
185215
u8 pr = cdw10[14];
186216

187-
trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
188-
slba, numd, zra, zrasf, pr);
217+
if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
218+
zrasf_str = zrasf_strs[zrasf];
219+
else
220+
zrasf_str = "reserved";
221+
222+
trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
223+
slba, numd, zra, zrasf, zrasf_str, pr);
224+
trace_seq_putc(p, 0);
225+
226+
return ret;
227+
}
228+
229+
static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
230+
{
231+
const char *ret = trace_seq_buffer_ptr(p);
232+
u8 rrega = cdw10[0] & 0x7;
233+
u8 iekey = (cdw10[0] >> 3) & 0x1;
234+
u8 ptpl = (cdw10[3] >> 6) & 0x3;
235+
236+
trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
237+
rrega, iekey, ptpl);
238+
trace_seq_putc(p, 0);
239+
240+
return ret;
241+
}
242+
243+
static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
244+
{
245+
const char *ret = trace_seq_buffer_ptr(p);
246+
u8 racqa = cdw10[0] & 0x7;
247+
u8 iekey = (cdw10[0] >> 3) & 0x1;
248+
u8 rtype = cdw10[1];
249+
250+
trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
251+
racqa, iekey, rtype);
252+
trace_seq_putc(p, 0);
253+
254+
return ret;
255+
}
256+
257+
static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
258+
{
259+
const char *ret = trace_seq_buffer_ptr(p);
260+
u8 rrela = cdw10[0] & 0x7;
261+
u8 iekey = (cdw10[0] >> 3) & 0x1;
262+
u8 rtype = cdw10[1];
263+
264+
trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
265+
rrela, iekey, rtype);
266+
trace_seq_putc(p, 0);
267+
268+
return ret;
269+
}
270+
271+
static const char *nvme_trace_resv_report(struct trace_seq *p, u8 *cdw10)
272+
{
273+
const char *ret = trace_seq_buffer_ptr(p);
274+
u32 numd = get_unaligned_le32(cdw10);
275+
u8 eds = cdw10[4] & 0x1;
276+
277+
trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
189278
trace_seq_putc(p, 0);
190279

191280
return ret;
@@ -243,6 +332,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
243332
return nvme_trace_zone_mgmt_send(p, cdw10);
244333
case nvme_cmd_zone_mgmt_recv:
245334
return nvme_trace_zone_mgmt_recv(p, cdw10);
335+
case nvme_cmd_resv_register:
336+
return nvme_trace_resv_reg(p, cdw10);
337+
case nvme_cmd_resv_acquire:
338+
return nvme_trace_resv_acq(p, cdw10);
339+
case nvme_cmd_resv_release:
340+
return nvme_trace_resv_rel(p, cdw10);
341+
case nvme_cmd_resv_report:
342+
return nvme_trace_resv_report(p, cdw10);
246343
default:
247344
return nvme_trace_common(p, cdw10);
248345
}

drivers/nvme/target/rdma.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ struct nvmet_rdma_cmd {
5353

5454
enum {
5555
NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
56-
NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
5756
};
5857

5958
struct nvmet_rdma_rsp {
@@ -722,7 +721,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
722721
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
723722
struct ib_send_wr *first_wr;
724723

725-
if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
724+
if (rsp->invalidate_rkey) {
726725
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
727726
rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
728727
} else {
@@ -905,10 +904,8 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
905904
goto error_out;
906905
rsp->n_rdma += ret;
907906

908-
if (invalidate) {
907+
if (invalidate)
909908
rsp->invalidate_rkey = key;
910-
rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
911-
}
912909

913910
return 0;
914911

@@ -1047,6 +1044,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
10471044
rsp->req.cmd = cmd->nvme_cmd;
10481045
rsp->req.port = queue->port;
10491046
rsp->n_rdma = 0;
1047+
rsp->invalidate_rkey = 0;
10501048

10511049
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
10521050
unsigned long flags;

0 commit comments

Comments
 (0)