|
58 | 58 | struct kvm_pgtable_walk_data {
|
59 | 59 | struct kvm_pgtable_walker *walker;
|
60 | 60 |
|
| 61 | + u64 start; |
61 | 62 | u64 addr;
|
62 | 63 | u64 end;
|
63 | 64 | };
|
@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
201 | 202 | .old = READ_ONCE(*ptep),
|
202 | 203 | .arg = data->walker->arg,
|
203 | 204 | .mm_ops = mm_ops,
|
| 205 | + .start = data->start, |
204 | 206 | .addr = data->addr,
|
205 | 207 | .end = data->end,
|
206 | 208 | .level = level,
|
@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
293 | 295 | struct kvm_pgtable_walker *walker)
|
294 | 296 | {
|
295 | 297 | struct kvm_pgtable_walk_data walk_data = {
|
| 298 | + .start = ALIGN_DOWN(addr, PAGE_SIZE), |
296 | 299 | .addr = ALIGN_DOWN(addr, PAGE_SIZE),
|
297 | 300 | .end = PAGE_ALIGN(walk_data.addr + size),
|
298 | 301 | .walker = walker,
|
@@ -794,20 +797,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
|
794 | 797 | return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
|
795 | 798 | }
|
796 | 799 |
|
| 800 | +static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, |
| 801 | + const struct stage2_map_data *data) |
| 802 | +{ |
| 803 | + u64 phys = data->phys; |
| 804 | + |
| 805 | + /* |
| 806 | + * Stage-2 walks to update ownership data are communicated to the map |
| 807 | + * walker using an invalid PA. Avoid offsetting an already invalid PA, |
| 808 | + * which could overflow and make the address valid again. |
| 809 | + */ |
| 810 | + if (!kvm_phys_is_valid(phys)) |
| 811 | + return phys; |
| 812 | + |
| 813 | + /* |
| 814 | + * Otherwise, work out the correct PA based on how far the walk has |
| 815 | + * gotten. |
| 816 | + */ |
| 817 | + return phys + (ctx->addr - ctx->start); |
| 818 | +} |
| 819 | + |
797 | 820 | static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
798 | 821 | struct stage2_map_data *data)
|
799 | 822 | {
|
| 823 | + u64 phys = stage2_map_walker_phys_addr(ctx, data); |
| 824 | + |
800 | 825 | if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
|
801 | 826 | return false;
|
802 | 827 |
|
803 |
| - return kvm_block_mapping_supported(ctx, data->phys); |
| 828 | + return kvm_block_mapping_supported(ctx, phys); |
804 | 829 | }
|
805 | 830 |
|
806 | 831 | static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
807 | 832 | struct stage2_map_data *data)
|
808 | 833 | {
|
809 | 834 | kvm_pte_t new;
|
810 |
| - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; |
| 835 | + u64 phys = stage2_map_walker_phys_addr(ctx, data); |
| 836 | + u64 granule = kvm_granule_size(ctx->level); |
811 | 837 | struct kvm_pgtable *pgt = data->mmu->pgt;
|
812 | 838 | struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
|
813 | 839 |
|
@@ -841,8 +867,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
841 | 867 |
|
842 | 868 | stage2_make_pte(ctx, new);
|
843 | 869 |
|
844 |
| - if (kvm_phys_is_valid(phys)) |
845 |
| - data->phys += granule; |
846 | 870 | return 0;
|
847 | 871 | }
|
848 | 872 |
|
|
0 commit comments