Skip to content

Commit 57ed58c

Browse files
Ming Leiaxboe
authored andcommitted
selftests: ublk: enable zero copy for stripe target
Use io_uring vectored fixed kernel buffer for handling stripe IO. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250325135155.935398-5-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 1045afa commit 57ed58c

File tree

2 files changed

+53
-17
lines changed

2 files changed

+53
-17
lines changed

tools/testing/selftests/ublk/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ TEST_PROGS += test_loop_05.sh
1717
TEST_PROGS += test_stripe_01.sh
1818
TEST_PROGS += test_stripe_02.sh
1919
TEST_PROGS += test_stripe_03.sh
20+
TEST_PROGS += test_stripe_04.sh
2021

2122
TEST_PROGS += test_stress_01.sh
2223
TEST_PROGS += test_stress_02.sh

tools/testing/selftests/ublk/stripe.c

Lines changed: 52 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -111,43 +111,67 @@ static void calculate_stripe_array(const struct stripe_conf *conf,
111111
}
112112
}
113113

114-
static inline enum io_uring_op stripe_to_uring_op(const struct ublksrv_io_desc *iod)
114+
static inline enum io_uring_op stripe_to_uring_op(
115+
const struct ublksrv_io_desc *iod, int zc)
115116
{
116117
unsigned ublk_op = ublksrv_get_op(iod);
117118

118119
if (ublk_op == UBLK_IO_OP_READ)
119-
return IORING_OP_READV;
120+
return zc ? IORING_OP_READV_FIXED : IORING_OP_READV;
120121
else if (ublk_op == UBLK_IO_OP_WRITE)
121-
return IORING_OP_WRITEV;
122+
return zc ? IORING_OP_WRITEV_FIXED : IORING_OP_WRITEV;
122123
assert(0);
123124
}
124125

125126
static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
126127
{
127128
const struct stripe_conf *conf = get_chunk_shift(q);
128-
enum io_uring_op op = stripe_to_uring_op(iod);
129+
int zc = !!(ublk_queue_use_zc(q) != 0);
130+
enum io_uring_op op = stripe_to_uring_op(iod, zc);
129131
struct io_uring_sqe *sqe[NR_STRIPE];
130132
struct stripe_array *s = alloc_stripe_array(conf, iod);
131133
struct ublk_io *io = ublk_get_io(q, tag);
132-
int i;
134+
int i, extra = zc ? 2 : 0;
133135

134136
io->private_data = s;
135137
calculate_stripe_array(conf, iod, s);
136138

137-
ublk_queue_alloc_sqes(q, sqe, s->nr);
138-
for (i = 0; i < s->nr; i++) {
139-
struct stripe *t = &s->s[i];
139+
ublk_queue_alloc_sqes(q, sqe, s->nr + extra);
140+
141+
if (zc) {
142+
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
143+
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
144+
sqe[0]->user_data = build_user_data(tag,
145+
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
146+
}
147+
148+
for (i = zc; i < s->nr + extra - zc; i++) {
149+
struct stripe *t = &s->s[i - zc];
140150

141151
io_uring_prep_rw(op, sqe[i],
142152
t->seq + 1,
143153
(void *)t->vec,
144154
t->nr_vec,
145155
t->start << 9);
146-
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
156+
if (zc) {
157+
sqe[i]->buf_index = tag;
158+
io_uring_sqe_set_flags(sqe[i],
159+
IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK);
160+
} else {
161+
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
162+
}
147163
/* bit63 marks us as tgt io */
148-
sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i, 1);
164+
sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1);
165+
}
166+
if (zc) {
167+
struct io_uring_sqe *unreg = sqe[s->nr + 1];
168+
169+
io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, tag);
170+
unreg->user_data = build_user_data(tag, ublk_cmd_op_nr(unreg->cmd_op), 0, 1);
149171
}
150-
return s->nr;
172+
173+
/* register buffer is skip_success */
174+
return s->nr + zc;
151175
}
152176

153177
static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
@@ -208,19 +232,27 @@ static void ublk_stripe_io_done(struct ublk_queue *q, int tag,
208232
struct ublk_io *io = ublk_get_io(q, tag);
209233
int res = cqe->res;
210234

211-
if (res < 0) {
235+
if (res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
212236
if (!io->result)
213237
io->result = res;
214-
ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
238+
if (res < 0)
239+
ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
215240
}
216241

242+
/* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
243+
if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
244+
io->tgt_ios += 1;
245+
217246
/* fail short READ/WRITE simply */
218247
if (op == UBLK_IO_OP_READ || op == UBLK_IO_OP_WRITE) {
219248
unsigned seq = user_data_to_tgt_data(cqe->user_data);
220249
struct stripe_array *s = io->private_data;
221250

222-
if (res < s->s[seq].vec->iov_len)
251+
if (res < s->s[seq].nr_sects << 9) {
223252
io->result = -EIO;
253+
ublk_err("%s: short rw op %u res %d exp %u tag %u\n",
254+
__func__, op, res, s->s[seq].vec->iov_len, tag);
255+
}
224256
}
225257

226258
if (ublk_completed_tgt_io(q, tag)) {
@@ -253,7 +285,7 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
253285
struct stripe_conf *conf;
254286
unsigned chunk_shift;
255287
loff_t bytes = 0;
256-
int ret, i;
288+
int ret, i, mul = 1;
257289

258290
if ((chunk_size & (chunk_size - 1)) || !chunk_size) {
259291
ublk_err("invalid chunk size %u\n", chunk_size);
@@ -295,8 +327,11 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
295327
dev->tgt.dev_size = bytes;
296328
p.basic.dev_sectors = bytes >> 9;
297329
dev->tgt.params = p;
298-
dev->tgt.sq_depth = dev->dev_info.queue_depth * conf->nr_files;
299-
dev->tgt.cq_depth = dev->dev_info.queue_depth * conf->nr_files;
330+
331+
if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)
332+
mul = 2;
333+
dev->tgt.sq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
334+
dev->tgt.cq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
300335

301336
printf("%s: shift %u files %u\n", __func__, conf->shift, conf->nr_files);
302337

0 commit comments

Comments
 (0)