@@ -59,6 +59,7 @@ struct convert_context {
59
59
struct bio * bio_out ;
60
60
struct bvec_iter iter_out ;
61
61
atomic_t cc_pending ;
62
+ unsigned int tag_offset ;
62
63
u64 cc_sector ;
63
64
union {
64
65
struct skcipher_request * req ;
@@ -1187,7 +1188,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1187
1188
1188
1189
tag_len = io -> cc -> tuple_size * (bio_sectors (bio ) >> io -> cc -> sector_shift );
1189
1190
1190
- bip -> bip_iter .bi_sector = io -> cc -> start + io -> sector ;
1191
+ bip -> bip_iter .bi_sector = bio -> bi_iter . bi_sector ;
1191
1192
1192
1193
ret = bio_integrity_add_page (bio , virt_to_page (io -> integrity_metadata ),
1193
1194
tag_len , offset_in_page (io -> integrity_metadata ));
@@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
1256
1257
if (bio_out )
1257
1258
ctx -> iter_out = bio_out -> bi_iter ;
1258
1259
ctx -> cc_sector = sector + cc -> iv_offset ;
1260
+ ctx -> tag_offset = 0 ;
1259
1261
init_completion (& ctx -> restart );
1260
1262
}
1261
1263
@@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
1588
1590
static blk_status_t crypt_convert (struct crypt_config * cc ,
1589
1591
struct convert_context * ctx , bool atomic , bool reset_pending )
1590
1592
{
1591
- unsigned int tag_offset = 0 ;
1592
1593
unsigned int sector_step = cc -> sector_size >> SECTOR_SHIFT ;
1593
1594
int r ;
1594
1595
@@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
1611
1612
atomic_inc (& ctx -> cc_pending );
1612
1613
1613
1614
if (crypt_integrity_aead (cc ))
1614
- r = crypt_convert_block_aead (cc , ctx , ctx -> r .req_aead , tag_offset );
1615
+ r = crypt_convert_block_aead (cc , ctx , ctx -> r .req_aead , ctx -> tag_offset );
1615
1616
else
1616
- r = crypt_convert_block_skcipher (cc , ctx , ctx -> r .req , tag_offset );
1617
+ r = crypt_convert_block_skcipher (cc , ctx , ctx -> r .req , ctx -> tag_offset );
1617
1618
1618
1619
switch (r ) {
1619
1620
/*
@@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
1633
1634
* exit and continue processing in a workqueue
1634
1635
*/
1635
1636
ctx -> r .req = NULL ;
1637
+ ctx -> tag_offset ++ ;
1636
1638
ctx -> cc_sector += sector_step ;
1637
- tag_offset ++ ;
1638
1639
return BLK_STS_DEV_RESOURCE ;
1639
1640
}
1640
1641
} else {
@@ -1648,16 +1649,16 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
1648
1649
*/
1649
1650
case - EINPROGRESS :
1650
1651
ctx -> r .req = NULL ;
1652
+ ctx -> tag_offset ++ ;
1651
1653
ctx -> cc_sector += sector_step ;
1652
- tag_offset ++ ;
1653
1654
continue ;
1654
1655
/*
1655
1656
* The request was already processed (synchronously).
1656
1657
*/
1657
1658
case 0 :
1658
1659
atomic_dec (& ctx -> cc_pending );
1659
1660
ctx -> cc_sector += sector_step ;
1660
- tag_offset ++ ;
1661
+ ctx -> tag_offset ++ ;
1661
1662
if (!atomic )
1662
1663
cond_resched ();
1663
1664
continue ;
@@ -1719,6 +1720,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
1719
1720
clone -> bi_private = io ;
1720
1721
clone -> bi_end_io = crypt_endio ;
1721
1722
clone -> bi_ioprio = io -> base_bio -> bi_ioprio ;
1723
+ clone -> bi_iter .bi_sector = cc -> start + io -> sector ;
1722
1724
1723
1725
remaining_size = size ;
1724
1726
@@ -1909,7 +1911,6 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1909
1911
crypt_dec_pending (io );
1910
1912
return 1 ;
1911
1913
}
1912
- clone -> bi_iter .bi_sector = cc -> start + io -> sector ;
1913
1914
crypt_convert_init (cc , & io -> ctx , clone , clone , io -> sector );
1914
1915
io -> saved_bi_iter = clone -> bi_iter ;
1915
1916
dm_submit_bio_remap (io -> base_bio , clone );
@@ -1925,13 +1926,13 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1925
1926
clone = bio_alloc_clone (cc -> dev -> bdev , io -> base_bio , gfp , & cc -> bs );
1926
1927
if (!clone )
1927
1928
return 1 ;
1929
+
1930
+ clone -> bi_iter .bi_sector = cc -> start + io -> sector ;
1928
1931
clone -> bi_private = io ;
1929
1932
clone -> bi_end_io = crypt_endio ;
1930
1933
1931
1934
crypt_inc_pending (io );
1932
1935
1933
- clone -> bi_iter .bi_sector = cc -> start + io -> sector ;
1934
-
1935
1936
if (dm_crypt_integrity_io_alloc (io , clone )) {
1936
1937
crypt_dec_pending (io );
1937
1938
bio_put (clone );
@@ -2039,8 +2040,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
2039
2040
/* crypt_convert should have filled the clone bio */
2040
2041
BUG_ON (io -> ctx .iter_out .bi_size );
2041
2042
2042
- clone -> bi_iter .bi_sector = cc -> start + io -> sector ;
2043
-
2044
2043
if ((likely (!async ) && test_bit (DM_CRYPT_NO_OFFLOAD , & cc -> flags )) ||
2045
2044
test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags )) {
2046
2045
dm_submit_bio_remap (io -> base_bio , clone );
@@ -2092,13 +2091,12 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
2092
2091
struct crypt_config * cc = io -> cc ;
2093
2092
struct convert_context * ctx = & io -> ctx ;
2094
2093
int crypt_finished ;
2095
- sector_t sector = io -> sector ;
2096
2094
blk_status_t r ;
2097
2095
2098
2096
wait_for_completion (& ctx -> restart );
2099
2097
reinit_completion (& ctx -> restart );
2100
2098
2101
- r = crypt_convert (cc , & io -> ctx , true , false);
2099
+ r = crypt_convert (cc , & io -> ctx , false , false);
2102
2100
if (r )
2103
2101
io -> error = r ;
2104
2102
crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
@@ -2109,10 +2107,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
2109
2107
}
2110
2108
2111
2109
/* Encryption was already finished, submit io now */
2112
- if (crypt_finished ) {
2110
+ if (crypt_finished )
2113
2111
kcryptd_crypt_write_io_submit (io , 0 );
2114
- io -> sector = sector ;
2115
- }
2116
2112
2117
2113
crypt_dec_pending (io );
2118
2114
}
@@ -2123,14 +2119,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2123
2119
struct convert_context * ctx = & io -> ctx ;
2124
2120
struct bio * clone ;
2125
2121
int crypt_finished ;
2126
- sector_t sector = io -> sector ;
2127
2122
blk_status_t r ;
2128
2123
2129
2124
/*
2130
2125
* Prevent io from disappearing until this function completes.
2131
2126
*/
2132
2127
crypt_inc_pending (io );
2133
- crypt_convert_init (cc , ctx , NULL , io -> base_bio , sector );
2128
+ crypt_convert_init (cc , ctx , NULL , io -> base_bio , io -> sector );
2134
2129
2135
2130
clone = crypt_alloc_buffer (io , io -> base_bio -> bi_iter .bi_size );
2136
2131
if (unlikely (!clone )) {
@@ -2147,8 +2142,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2147
2142
io -> ctx .iter_in = clone -> bi_iter ;
2148
2143
}
2149
2144
2150
- sector += bio_sectors (clone );
2151
-
2152
2145
crypt_inc_pending (io );
2153
2146
r = crypt_convert (cc , ctx ,
2154
2147
test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ), true);
@@ -2172,10 +2165,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2172
2165
}
2173
2166
2174
2167
/* Encryption was already finished, submit io now */
2175
- if (crypt_finished ) {
2168
+ if (crypt_finished )
2176
2169
kcryptd_crypt_write_io_submit (io , 0 );
2177
- io -> sector = sector ;
2178
- }
2179
2170
2180
2171
dec :
2181
2172
crypt_dec_pending (io );
@@ -2203,7 +2194,7 @@ static void kcryptd_crypt_read_continue(struct work_struct *work)
2203
2194
wait_for_completion (& io -> ctx .restart );
2204
2195
reinit_completion (& io -> ctx .restart );
2205
2196
2206
- r = crypt_convert (cc , & io -> ctx , true , false);
2197
+ r = crypt_convert (cc , & io -> ctx , false , false);
2207
2198
if (r )
2208
2199
io -> error = r ;
2209
2200
@@ -2221,7 +2212,6 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2221
2212
crypt_inc_pending (io );
2222
2213
2223
2214
if (io -> ctx .aead_recheck ) {
2224
- io -> ctx .cc_sector = io -> sector + cc -> iv_offset ;
2225
2215
r = crypt_convert (cc , & io -> ctx ,
2226
2216
test_bit (DM_CRYPT_NO_READ_WORKQUEUE , & cc -> flags ), true);
2227
2217
} else {
0 commit comments