@@ -1362,6 +1362,82 @@ static int io_estimate_bvec_size(struct iovec *iov, unsigned nr_iovs,
1362
1362
return max_segs ;
1363
1363
}
1364
1364
1365
+ static int io_vec_fill_kern_bvec (int ddir , struct iov_iter * iter ,
1366
+ struct io_mapped_ubuf * imu ,
1367
+ struct iovec * iovec , unsigned nr_iovs ,
1368
+ struct iou_vec * vec )
1369
+ {
1370
+ const struct bio_vec * src_bvec = imu -> bvec ;
1371
+ struct bio_vec * res_bvec = vec -> bvec ;
1372
+ unsigned res_idx = 0 ;
1373
+ size_t total_len = 0 ;
1374
+ unsigned iov_idx ;
1375
+
1376
+ for (iov_idx = 0 ; iov_idx < nr_iovs ; iov_idx ++ ) {
1377
+ size_t offset = (size_t )(uintptr_t )iovec [iov_idx ].iov_base ;
1378
+ size_t iov_len = iovec [iov_idx ].iov_len ;
1379
+ struct bvec_iter bi = {
1380
+ .bi_size = offset + iov_len ,
1381
+ };
1382
+ struct bio_vec bv ;
1383
+
1384
+ bvec_iter_advance (src_bvec , & bi , offset );
1385
+ for_each_mp_bvec (bv , src_bvec , bi , bi )
1386
+ res_bvec [res_idx ++ ] = bv ;
1387
+ total_len += iov_len ;
1388
+ }
1389
+ iov_iter_bvec (iter , ddir , res_bvec , res_idx , total_len );
1390
+ return 0 ;
1391
+ }
1392
+
1393
+ static int iov_kern_bvec_size (const struct iovec * iov ,
1394
+ const struct io_mapped_ubuf * imu ,
1395
+ unsigned int * nr_seg )
1396
+ {
1397
+ size_t offset = (size_t )(uintptr_t )iov -> iov_base ;
1398
+ const struct bio_vec * bvec = imu -> bvec ;
1399
+ int start = 0 , i = 0 ;
1400
+ size_t off = 0 ;
1401
+ int ret ;
1402
+
1403
+ ret = validate_fixed_range (offset , iov -> iov_len , imu );
1404
+ if (unlikely (ret ))
1405
+ return ret ;
1406
+
1407
+ for (i = 0 ; off < offset + iov -> iov_len && i < imu -> nr_bvecs ;
1408
+ off += bvec [i ].bv_len , i ++ ) {
1409
+ if (offset >= off && offset < off + bvec [i ].bv_len )
1410
+ start = i ;
1411
+ }
1412
+ * nr_seg = i - start ;
1413
+ return 0 ;
1414
+ }
1415
+
1416
+ static int io_kern_bvec_size (struct iovec * iov , unsigned nr_iovs ,
1417
+ struct io_mapped_ubuf * imu , unsigned * nr_segs )
1418
+ {
1419
+ unsigned max_segs = 0 ;
1420
+ size_t total_len = 0 ;
1421
+ unsigned i ;
1422
+ int ret ;
1423
+
1424
+ * nr_segs = 0 ;
1425
+ for (i = 0 ; i < nr_iovs ; i ++ ) {
1426
+ if (unlikely (!iov [i ].iov_len ))
1427
+ return - EFAULT ;
1428
+ if (unlikely (check_add_overflow (total_len , iov [i ].iov_len ,
1429
+ & total_len )))
1430
+ return - EOVERFLOW ;
1431
+ ret = iov_kern_bvec_size (& iov [i ], imu , & max_segs );
1432
+ if (unlikely (ret ))
1433
+ return ret ;
1434
+ * nr_segs += max_segs ;
1435
+ }
1436
+ if (total_len > MAX_RW_COUNT )
1437
+ return - EINVAL ;
1438
+ return 0 ;
1439
+ }
1440
+
1365
1441
int io_import_reg_vec (int ddir , struct iov_iter * iter ,
1366
1442
struct io_kiocb * req , struct iou_vec * vec ,
1367
1443
unsigned nr_iovs , unsigned issue_flags )
@@ -1376,14 +1452,20 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
1376
1452
if (!node )
1377
1453
return - EFAULT ;
1378
1454
imu = node -> buf ;
1379
- if (imu -> is_kbuf )
1380
- return - EOPNOTSUPP ;
1381
1455
if (!(imu -> dir & (1 << ddir )))
1382
1456
return - EFAULT ;
1383
1457
1384
1458
iovec_off = vec -> nr - nr_iovs ;
1385
1459
iov = vec -> iovec + iovec_off ;
1386
- nr_segs = io_estimate_bvec_size (iov , nr_iovs , imu );
1460
+
1461
+ if (imu -> is_kbuf ) {
1462
+ int ret = io_kern_bvec_size (iov , nr_iovs , imu , & nr_segs );
1463
+
1464
+ if (unlikely (ret ))
1465
+ return ret ;
1466
+ } else {
1467
+ nr_segs = io_estimate_bvec_size (iov , nr_iovs , imu );
1468
+ }
1387
1469
1388
1470
if (sizeof (struct bio_vec ) > sizeof (struct iovec )) {
1389
1471
size_t bvec_bytes ;
@@ -1410,6 +1492,9 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
1410
1492
req -> flags |= REQ_F_NEED_CLEANUP ;
1411
1493
}
1412
1494
1495
+ if (imu -> is_kbuf )
1496
+ return io_vec_fill_kern_bvec (ddir , iter , imu , iov , nr_iovs , vec );
1497
+
1413
1498
return io_vec_fill_bvec (ddir , iter , imu , iov , nr_iovs , vec );
1414
1499
}
1415
1500
0 commit comments