@@ -1066,7 +1066,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1066
1066
if (ceph_inode_is_shutdown (inode ))
1067
1067
return - EIO ;
1068
1068
1069
- if (!len )
1069
+ if (!len || ! i_size )
1070
1070
return 0 ;
1071
1071
/*
1072
1072
* flush any page cache pages in this range. this
@@ -1086,7 +1086,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1086
1086
int num_pages ;
1087
1087
size_t page_off ;
1088
1088
bool more ;
1089
- int idx ;
1089
+ int idx = 0 ;
1090
1090
size_t left ;
1091
1091
struct ceph_osd_req_op * op ;
1092
1092
u64 read_off = off ;
@@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1116
1116
len = read_off + read_len - off ;
1117
1117
more = len < iov_iter_count (to );
1118
1118
1119
+ op = & req -> r_ops [0 ];
1120
+ if (sparse ) {
1121
+ extent_cnt = __ceph_sparse_read_ext_count (inode , read_len );
1122
+ ret = ceph_alloc_sparse_ext_map (op , extent_cnt );
1123
+ if (ret ) {
1124
+ ceph_osdc_put_request (req );
1125
+ break ;
1126
+ }
1127
+ }
1128
+
1119
1129
num_pages = calc_pages_for (read_off , read_len );
1120
1130
page_off = offset_in_page (off );
1121
1131
pages = ceph_alloc_page_vector (num_pages , GFP_KERNEL );
@@ -1127,17 +1137,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1127
1137
1128
1138
osd_req_op_extent_osd_data_pages (req , 0 , pages , read_len ,
1129
1139
offset_in_page (read_off ),
1130
- false, false);
1131
-
1132
- op = & req -> r_ops [0 ];
1133
- if (sparse ) {
1134
- extent_cnt = __ceph_sparse_read_ext_count (inode , read_len );
1135
- ret = ceph_alloc_sparse_ext_map (op , extent_cnt );
1136
- if (ret ) {
1137
- ceph_osdc_put_request (req );
1138
- break ;
1139
- }
1140
- }
1140
+ false, true);
1141
1141
1142
1142
ceph_osdc_start_request (osdc , req );
1143
1143
ret = ceph_osdc_wait_request (osdc , req );
@@ -1160,7 +1160,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1160
1160
else if (ret == - ENOENT )
1161
1161
ret = 0 ;
1162
1162
1163
- if (ret > 0 && IS_ENCRYPTED (inode )) {
1163
+ if (ret < 0 ) {
1164
+ ceph_osdc_put_request (req );
1165
+ if (ret == - EBLOCKLISTED )
1166
+ fsc -> blocklisted = true;
1167
+ break ;
1168
+ }
1169
+
1170
+ if (IS_ENCRYPTED (inode )) {
1164
1171
int fret ;
1165
1172
1166
1173
fret = ceph_fscrypt_decrypt_extents (inode , pages ,
@@ -1186,10 +1193,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1186
1193
ret = min_t (ssize_t , fret , len );
1187
1194
}
1188
1195
1189
- ceph_osdc_put_request (req );
1190
-
1191
1196
/* Short read but not EOF? Zero out the remainder. */
1192
- if (ret >= 0 && ret < len && (off + ret < i_size )) {
1197
+ if (ret < len && (off + ret < i_size )) {
1193
1198
int zlen = min (len - ret , i_size - off - ret );
1194
1199
int zoff = page_off + ret ;
1195
1200
@@ -1199,13 +1204,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1199
1204
ret += zlen ;
1200
1205
}
1201
1206
1202
- idx = 0 ;
1203
- if (ret <= 0 )
1204
- left = 0 ;
1205
- else if (off + ret > i_size )
1206
- left = i_size - off ;
1207
+ if (off + ret > i_size )
1208
+ left = (i_size > off ) ? i_size - off : 0 ;
1207
1209
else
1208
1210
left = ret ;
1211
+
1209
1212
while (left > 0 ) {
1210
1213
size_t plen , copied ;
1211
1214
@@ -1221,13 +1224,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
1221
1224
break ;
1222
1225
}
1223
1226
}
1224
- ceph_release_page_vector (pages , num_pages );
1225
1227
1226
- if (ret < 0 ) {
1227
- if (ret == - EBLOCKLISTED )
1228
- fsc -> blocklisted = true;
1229
- break ;
1230
- }
1228
+ ceph_osdc_put_request (req );
1231
1229
1232
1230
if (off >= i_size || !more )
1233
1231
break ;
@@ -1553,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1553
1551
break ;
1554
1552
}
1555
1553
1554
+ op = & req -> r_ops [0 ];
1555
+ if (!write && sparse ) {
1556
+ extent_cnt = __ceph_sparse_read_ext_count (inode , size );
1557
+ ret = ceph_alloc_sparse_ext_map (op , extent_cnt );
1558
+ if (ret ) {
1559
+ ceph_osdc_put_request (req );
1560
+ break ;
1561
+ }
1562
+ }
1563
+
1556
1564
len = iter_get_bvecs_alloc (iter , size , & bvecs , & num_pages );
1557
1565
if (len < 0 ) {
1558
1566
ceph_osdc_put_request (req );
@@ -1562,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1562
1570
if (len != size )
1563
1571
osd_req_op_extent_update (req , 0 , len );
1564
1572
1573
+ osd_req_op_extent_osd_data_bvecs (req , 0 , bvecs , num_pages , len );
1574
+
1565
1575
/*
1566
1576
* To simplify error handling, allow AIO when IO within i_size
1567
1577
* or IO can be satisfied by single OSD request.
@@ -1593,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1593
1603
req -> r_mtime = mtime ;
1594
1604
}
1595
1605
1596
- osd_req_op_extent_osd_data_bvecs (req , 0 , bvecs , num_pages , len );
1597
- op = & req -> r_ops [0 ];
1598
- if (sparse ) {
1599
- extent_cnt = __ceph_sparse_read_ext_count (inode , size );
1600
- ret = ceph_alloc_sparse_ext_map (op , extent_cnt );
1601
- if (ret ) {
1602
- ceph_osdc_put_request (req );
1603
- break ;
1604
- }
1605
- }
1606
-
1607
1606
if (aio_req ) {
1608
1607
aio_req -> total_len += len ;
1609
1608
aio_req -> num_reqs ++ ;
0 commit comments