|
34 | 34 | #define PT_HAVE_ACCESSED_DIRTY(mmu) true
|
35 | 35 | #ifdef CONFIG_X86_64
|
36 | 36 | #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
|
37 |
| - #define CMPXCHG cmpxchg |
| 37 | + #define CMPXCHG "cmpxchgq" |
38 | 38 | #else
|
39 |
| - #define CMPXCHG cmpxchg64 |
40 | 39 | #define PT_MAX_FULL_LEVELS 2
|
41 | 40 | #endif
|
42 | 41 | #elif PTTYPE == 32
|
|
52 | 51 | #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
|
53 | 52 | #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
|
54 | 53 | #define PT_HAVE_ACCESSED_DIRTY(mmu) true
|
55 |
| - #define CMPXCHG cmpxchg |
| 54 | + #define CMPXCHG "cmpxchgl" |
56 | 55 | #elif PTTYPE == PTTYPE_EPT
|
57 | 56 | #define pt_element_t u64
|
58 | 57 | #define guest_walker guest_walkerEPT
|
|
65 | 64 | #define PT_GUEST_DIRTY_SHIFT 9
|
66 | 65 | #define PT_GUEST_ACCESSED_SHIFT 8
|
67 | 66 | #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
|
68 |
| - #define CMPXCHG cmpxchg64 |
| 67 | + #ifdef CONFIG_X86_64 |
| 68 | + #define CMPXCHG "cmpxchgq" |
| 69 | + #endif |
69 | 70 | #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
|
70 | 71 | #else
|
71 | 72 | #error Invalid PTTYPE value
|
@@ -147,43 +148,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
147 | 148 | pt_element_t __user *ptep_user, unsigned index,
|
148 | 149 | pt_element_t orig_pte, pt_element_t new_pte)
|
149 | 150 | {
|
150 |
| - int npages; |
151 |
| - pt_element_t ret; |
152 |
| - pt_element_t *table; |
153 |
| - struct page *page; |
154 |
| - |
155 |
| - npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); |
156 |
| - if (likely(npages == 1)) { |
157 |
| - table = kmap_atomic(page); |
158 |
| - ret = CMPXCHG(&table[index], orig_pte, new_pte); |
159 |
| - kunmap_atomic(table); |
160 |
| - |
161 |
| - kvm_release_page_dirty(page); |
162 |
| - } else { |
163 |
| - struct vm_area_struct *vma; |
164 |
| - unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; |
165 |
| - unsigned long pfn; |
166 |
| - unsigned long paddr; |
167 |
| - |
168 |
| - mmap_read_lock(current->mm); |
169 |
| - vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); |
170 |
| - if (!vma || !(vma->vm_flags & VM_PFNMAP)) { |
171 |
| - mmap_read_unlock(current->mm); |
172 |
| - return -EFAULT; |
173 |
| - } |
174 |
| - pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
175 |
| - paddr = pfn << PAGE_SHIFT; |
176 |
| - table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); |
177 |
| - if (!table) { |
178 |
| - mmap_read_unlock(current->mm); |
179 |
| - return -EFAULT; |
180 |
| - } |
181 |
| - ret = CMPXCHG(&table[index], orig_pte, new_pte); |
182 |
| - memunmap(table); |
183 |
| - mmap_read_unlock(current->mm); |
184 |
| - } |
| 151 | + signed char r; |
185 | 152 |
|
186 |
| - return (ret != orig_pte); |
| 153 | + if (!user_access_begin(ptep_user, sizeof(pt_element_t))) |
| 154 | + return -EFAULT; |
| 155 | + |
| 156 | +#ifdef CMPXCHG |
| 157 | + asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n" |
| 158 | + "setnz %b[r]\n" |
| 159 | + "2:" |
| 160 | + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r]) |
| 161 | + : [ptr] "+m" (*ptep_user), |
| 162 | + [old] "+a" (orig_pte), |
| 163 | + [r] "=q" (r) |
| 164 | + : [new] "r" (new_pte) |
| 165 | + : "memory"); |
| 166 | +#else |
| 167 | + asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n" |
| 168 | + "setnz %b[r]\n" |
| 169 | + "2:" |
| 170 | + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r]) |
| 171 | + : [ptr] "+m" (*ptep_user), |
| 172 | + [old] "+A" (orig_pte), |
| 173 | + [r] "=q" (r) |
| 174 | + : [new_lo] "b" ((u32)new_pte), |
| 175 | + [new_hi] "c" ((u32)(new_pte >> 32)) |
| 176 | + : "memory"); |
| 177 | +#endif |
| 178 | + |
| 179 | + user_access_end(); |
| 180 | + return r; |
187 | 181 | }
|
188 | 182 |
|
189 | 183 | static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
|
|
0 commit comments