|
58 | 58 | struct kvm_pgtable_walk_data {
|
59 | 59 | struct kvm_pgtable_walker *walker;
|
60 | 60 |
|
| 61 | + const u64 start; |
61 | 62 | u64 addr;
|
62 |
| - u64 end; |
| 63 | + const u64 end; |
63 | 64 | };
|
64 | 65 |
|
65 | 66 | static bool kvm_phys_is_valid(u64 phys)
|
@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
201 | 202 | .old = READ_ONCE(*ptep),
|
202 | 203 | .arg = data->walker->arg,
|
203 | 204 | .mm_ops = mm_ops,
|
| 205 | + .start = data->start, |
204 | 206 | .addr = data->addr,
|
205 | 207 | .end = data->end,
|
206 | 208 | .level = level,
|
@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
293 | 295 | struct kvm_pgtable_walker *walker)
|
294 | 296 | {
|
295 | 297 | struct kvm_pgtable_walk_data walk_data = {
|
| 298 | + .start = ALIGN_DOWN(addr, PAGE_SIZE), |
296 | 299 | .addr = ALIGN_DOWN(addr, PAGE_SIZE),
|
297 | 300 | .end = PAGE_ALIGN(walk_data.addr + size),
|
298 | 301 | .walker = walker,
|
@@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
|
349 | 352 | }
|
350 | 353 |
|
351 | 354 | struct hyp_map_data {
|
352 |
| - u64 phys; |
| 355 | + const u64 phys; |
353 | 356 | kvm_pte_t attr;
|
354 | 357 | };
|
355 | 358 |
|
@@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
|
407 | 410 | static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
408 | 411 | struct hyp_map_data *data)
|
409 | 412 | {
|
| 413 | + u64 phys = data->phys + (ctx->addr - ctx->start); |
410 | 414 | kvm_pte_t new;
|
411 |
| - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; |
412 | 415 |
|
413 | 416 | if (!kvm_block_mapping_supported(ctx, phys))
|
414 | 417 | return false;
|
415 | 418 |
|
416 |
| - data->phys += granule; |
417 | 419 | new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
|
418 | 420 | if (ctx->old == new)
|
419 | 421 | return true;
|
@@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
|
576 | 578 | }
|
577 | 579 |
|
578 | 580 | struct stage2_map_data {
|
579 |
| - u64 phys; |
| 581 | + const u64 phys; |
580 | 582 | kvm_pte_t attr;
|
581 | 583 | u8 owner_id;
|
582 | 584 |
|
@@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
|
794 | 796 | return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
|
795 | 797 | }
|
796 | 798 |
|
| 799 | +static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, |
| 800 | + const struct stage2_map_data *data) |
| 801 | +{ |
| 802 | + u64 phys = data->phys; |
| 803 | + |
| 804 | + /* |
| 805 | + * Stage-2 walks to update ownership data are communicated to the map |
| 806 | + * walker using an invalid PA. Avoid offsetting an already invalid PA, |
| 807 | + * which could overflow and make the address valid again. |
| 808 | + */ |
| 809 | + if (!kvm_phys_is_valid(phys)) |
| 810 | + return phys; |
| 811 | + |
| 812 | + /* |
| 813 | + * Otherwise, work out the correct PA based on how far the walk has |
| 814 | + * gotten. |
| 815 | + */ |
| 816 | + return phys + (ctx->addr - ctx->start); |
| 817 | +} |
| 818 | + |
797 | 819 | static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
798 | 820 | struct stage2_map_data *data)
|
799 | 821 | {
|
| 822 | + u64 phys = stage2_map_walker_phys_addr(ctx, data); |
| 823 | + |
800 | 824 | if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
|
801 | 825 | return false;
|
802 | 826 |
|
803 |
| - return kvm_block_mapping_supported(ctx, data->phys); |
| 827 | + return kvm_block_mapping_supported(ctx, phys); |
804 | 828 | }
|
805 | 829 |
|
806 | 830 | static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
807 | 831 | struct stage2_map_data *data)
|
808 | 832 | {
|
809 | 833 | kvm_pte_t new;
|
810 |
| - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; |
| 834 | + u64 phys = stage2_map_walker_phys_addr(ctx, data); |
| 835 | + u64 granule = kvm_granule_size(ctx->level); |
811 | 836 | struct kvm_pgtable *pgt = data->mmu->pgt;
|
812 | 837 | struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
|
813 | 838 |
|
@@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
841 | 866 |
|
842 | 867 | stage2_make_pte(ctx, new);
|
843 | 868 |
|
844 |
| - if (kvm_phys_is_valid(phys)) |
845 |
| - data->phys += granule; |
846 | 869 | return 0;
|
847 | 870 | }
|
848 | 871 |
|
|
0 commit comments