Skip to content

Commit cbfac77

Browse files
committed
xen/blkback: move blkif_get_x86_*_req() into blkback.c
There is no need to have the functions blkif_get_x86_32_req() and blkif_get_x86_64_req() in a header file, as they are used in one place only. So move them into the using source file and drop the inline qualifier. While at it fix some style issues, and simplify the code by variable reusing and using min() instead of open coding it. Instead of using barrier() use READ_ONCE() for avoiding multiple reads of nr_segments. Signed-off-by: Juergen Gross <jgross@suse.com> Acked-by: Roger Pau Monné <roger.pau@citrix.com> Signed-off-by: Juergen Gross <jgross@suse.com>
1 parent e7b4c07 commit cbfac77

File tree

2 files changed

+104
-96
lines changed

2 files changed

+104
-96
lines changed

drivers/block/xen-blkback/blkback.c

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
10721072
bio_put(bio);
10731073
}
10741074

1075+
static void blkif_get_x86_32_req(struct blkif_request *dst,
1076+
const struct blkif_x86_32_request *src)
1077+
{
1078+
unsigned int i, n;
1079+
1080+
dst->operation = READ_ONCE(src->operation);
1081+
1082+
switch (dst->operation) {
1083+
case BLKIF_OP_READ:
1084+
case BLKIF_OP_WRITE:
1085+
case BLKIF_OP_WRITE_BARRIER:
1086+
case BLKIF_OP_FLUSH_DISKCACHE:
1087+
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
1088+
dst->u.rw.handle = src->u.rw.handle;
1089+
dst->u.rw.id = src->u.rw.id;
1090+
dst->u.rw.sector_number = src->u.rw.sector_number;
1091+
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
1092+
dst->u.rw.nr_segments);
1093+
for (i = 0; i < n; i++)
1094+
dst->u.rw.seg[i] = src->u.rw.seg[i];
1095+
break;
1096+
1097+
case BLKIF_OP_DISCARD:
1098+
dst->u.discard.flag = src->u.discard.flag;
1099+
dst->u.discard.id = src->u.discard.id;
1100+
dst->u.discard.sector_number = src->u.discard.sector_number;
1101+
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
1102+
break;
1103+
1104+
case BLKIF_OP_INDIRECT:
1105+
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
1106+
dst->u.indirect.nr_segments =
1107+
READ_ONCE(src->u.indirect.nr_segments);
1108+
dst->u.indirect.handle = src->u.indirect.handle;
1109+
dst->u.indirect.id = src->u.indirect.id;
1110+
dst->u.indirect.sector_number = src->u.indirect.sector_number;
1111+
n = min(MAX_INDIRECT_PAGES,
1112+
INDIRECT_PAGES(dst->u.indirect.nr_segments));
1113+
for (i = 0; i < n; i++)
1114+
dst->u.indirect.indirect_grefs[i] =
1115+
src->u.indirect.indirect_grefs[i];
1116+
break;
1117+
1118+
default:
1119+
/*
1120+
* Don't know how to translate this op. Only get the
1121+
* ID so failure can be reported to the frontend.
1122+
*/
1123+
dst->u.other.id = src->u.other.id;
1124+
break;
1125+
}
1126+
}
10751127

1128+
static void blkif_get_x86_64_req(struct blkif_request *dst,
1129+
const struct blkif_x86_64_request *src)
1130+
{
1131+
unsigned int i, n;
1132+
1133+
dst->operation = READ_ONCE(src->operation);
1134+
1135+
switch (dst->operation) {
1136+
case BLKIF_OP_READ:
1137+
case BLKIF_OP_WRITE:
1138+
case BLKIF_OP_WRITE_BARRIER:
1139+
case BLKIF_OP_FLUSH_DISKCACHE:
1140+
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
1141+
dst->u.rw.handle = src->u.rw.handle;
1142+
dst->u.rw.id = src->u.rw.id;
1143+
dst->u.rw.sector_number = src->u.rw.sector_number;
1144+
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
1145+
dst->u.rw.nr_segments);
1146+
for (i = 0; i < n; i++)
1147+
dst->u.rw.seg[i] = src->u.rw.seg[i];
1148+
break;
1149+
1150+
case BLKIF_OP_DISCARD:
1151+
dst->u.discard.flag = src->u.discard.flag;
1152+
dst->u.discard.id = src->u.discard.id;
1153+
dst->u.discard.sector_number = src->u.discard.sector_number;
1154+
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
1155+
break;
1156+
1157+
case BLKIF_OP_INDIRECT:
1158+
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
1159+
dst->u.indirect.nr_segments =
1160+
READ_ONCE(src->u.indirect.nr_segments);
1161+
dst->u.indirect.handle = src->u.indirect.handle;
1162+
dst->u.indirect.id = src->u.indirect.id;
1163+
dst->u.indirect.sector_number = src->u.indirect.sector_number;
1164+
n = min(MAX_INDIRECT_PAGES,
1165+
INDIRECT_PAGES(dst->u.indirect.nr_segments));
1166+
for (i = 0; i < n; i++)
1167+
dst->u.indirect.indirect_grefs[i] =
1168+
src->u.indirect.indirect_grefs[i];
1169+
break;
1170+
1171+
default:
1172+
/*
1173+
* Don't know how to translate this op. Only get the
1174+
* ID so failure can be reported to the frontend.
1175+
*/
1176+
dst->u.other.id = src->u.other.id;
1177+
break;
1178+
}
1179+
}
10761180

10771181
/*
10781182
* Function to copy the from the ring buffer the 'struct blkif_request'

drivers/block/xen-blkback/common.h

Lines changed: 0 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -394,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
394394
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
395395
void xen_blkbk_unmap_purged_grants(struct work_struct *work);
396396

397-
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
398-
struct blkif_x86_32_request *src)
399-
{
400-
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
401-
dst->operation = READ_ONCE(src->operation);
402-
switch (dst->operation) {
403-
case BLKIF_OP_READ:
404-
case BLKIF_OP_WRITE:
405-
case BLKIF_OP_WRITE_BARRIER:
406-
case BLKIF_OP_FLUSH_DISKCACHE:
407-
dst->u.rw.nr_segments = src->u.rw.nr_segments;
408-
dst->u.rw.handle = src->u.rw.handle;
409-
dst->u.rw.id = src->u.rw.id;
410-
dst->u.rw.sector_number = src->u.rw.sector_number;
411-
barrier();
412-
if (n > dst->u.rw.nr_segments)
413-
n = dst->u.rw.nr_segments;
414-
for (i = 0; i < n; i++)
415-
dst->u.rw.seg[i] = src->u.rw.seg[i];
416-
break;
417-
case BLKIF_OP_DISCARD:
418-
dst->u.discard.flag = src->u.discard.flag;
419-
dst->u.discard.id = src->u.discard.id;
420-
dst->u.discard.sector_number = src->u.discard.sector_number;
421-
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
422-
break;
423-
case BLKIF_OP_INDIRECT:
424-
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
425-
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
426-
dst->u.indirect.handle = src->u.indirect.handle;
427-
dst->u.indirect.id = src->u.indirect.id;
428-
dst->u.indirect.sector_number = src->u.indirect.sector_number;
429-
barrier();
430-
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
431-
for (i = 0; i < j; i++)
432-
dst->u.indirect.indirect_grefs[i] =
433-
src->u.indirect.indirect_grefs[i];
434-
break;
435-
default:
436-
/*
437-
* Don't know how to translate this op. Only get the
438-
* ID so failure can be reported to the frontend.
439-
*/
440-
dst->u.other.id = src->u.other.id;
441-
break;
442-
}
443-
}
444-
445-
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
446-
struct blkif_x86_64_request *src)
447-
{
448-
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
449-
dst->operation = READ_ONCE(src->operation);
450-
switch (dst->operation) {
451-
case BLKIF_OP_READ:
452-
case BLKIF_OP_WRITE:
453-
case BLKIF_OP_WRITE_BARRIER:
454-
case BLKIF_OP_FLUSH_DISKCACHE:
455-
dst->u.rw.nr_segments = src->u.rw.nr_segments;
456-
dst->u.rw.handle = src->u.rw.handle;
457-
dst->u.rw.id = src->u.rw.id;
458-
dst->u.rw.sector_number = src->u.rw.sector_number;
459-
barrier();
460-
if (n > dst->u.rw.nr_segments)
461-
n = dst->u.rw.nr_segments;
462-
for (i = 0; i < n; i++)
463-
dst->u.rw.seg[i] = src->u.rw.seg[i];
464-
break;
465-
case BLKIF_OP_DISCARD:
466-
dst->u.discard.flag = src->u.discard.flag;
467-
dst->u.discard.id = src->u.discard.id;
468-
dst->u.discard.sector_number = src->u.discard.sector_number;
469-
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
470-
break;
471-
case BLKIF_OP_INDIRECT:
472-
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
473-
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
474-
dst->u.indirect.handle = src->u.indirect.handle;
475-
dst->u.indirect.id = src->u.indirect.id;
476-
dst->u.indirect.sector_number = src->u.indirect.sector_number;
477-
barrier();
478-
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
479-
for (i = 0; i < j; i++)
480-
dst->u.indirect.indirect_grefs[i] =
481-
src->u.indirect.indirect_grefs[i];
482-
break;
483-
default:
484-
/*
485-
* Don't know how to translate this op. Only get the
486-
* ID so failure can be reported to the frontend.
487-
*/
488-
dst->u.other.id = src->u.other.id;
489-
break;
490-
}
491-
}
492-
493397
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */

0 commit comments

Comments
 (0)