19
19
#include <asm/sections.h>
20
20
#include <asm/uv.h>
21
21
22
- #if !IS_ENABLED (CONFIG_KVM )
23
- unsigned long __gmap_translate (struct gmap * gmap , unsigned long gaddr )
24
- {
25
- return 0 ;
26
- }
27
-
28
- int gmap_fault (struct gmap * gmap , unsigned long gaddr ,
29
- unsigned int fault_flags )
30
- {
31
- return 0 ;
32
- }
33
- #endif
34
-
35
22
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
36
23
int __bootdata_preserved (prot_virt_guest );
37
24
EXPORT_SYMBOL (prot_virt_guest );
@@ -159,6 +146,7 @@ int uv_destroy_folio(struct folio *folio)
159
146
folio_put (folio );
160
147
return rc ;
161
148
}
149
+ EXPORT_SYMBOL (uv_destroy_folio );
162
150
163
151
/*
164
152
* The present PTE still indirectly holds a folio reference through the mapping.
@@ -175,7 +163,7 @@ int uv_destroy_pte(pte_t pte)
175
163
*
176
164
* @paddr: Absolute host address of page to be exported
177
165
*/
178
- static int uv_convert_from_secure (unsigned long paddr )
166
+ int uv_convert_from_secure (unsigned long paddr )
179
167
{
180
168
struct uv_cb_cfs uvcb = {
181
169
.header .cmd = UVC_CMD_CONV_FROM_SEC_STOR ,
@@ -187,11 +175,12 @@ static int uv_convert_from_secure(unsigned long paddr)
187
175
return - EINVAL ;
188
176
return 0 ;
189
177
}
178
+ EXPORT_SYMBOL_GPL (uv_convert_from_secure );
190
179
191
180
/*
192
181
* The caller must already hold a reference to the folio.
193
182
*/
194
- static int uv_convert_from_secure_folio (struct folio * folio )
183
+ int uv_convert_from_secure_folio (struct folio * folio )
195
184
{
196
185
int rc ;
197
186
@@ -206,6 +195,7 @@ static int uv_convert_from_secure_folio(struct folio *folio)
206
195
folio_put (folio );
207
196
return rc ;
208
197
}
198
+ EXPORT_SYMBOL_GPL (uv_convert_from_secure_folio );
209
199
210
200
/*
211
201
* The present PTE still indirectly holds a folio reference through the mapping.
@@ -237,13 +227,33 @@ static int expected_folio_refs(struct folio *folio)
237
227
return res ;
238
228
}
239
229
240
- static int make_folio_secure (struct folio * folio , struct uv_cb_header * uvcb )
230
+ /**
231
+ * make_folio_secure() - make a folio secure
232
+ * @folio: the folio to make secure
233
+ * @uvcb: the uvcb that describes the UVC to be used
234
+ *
235
+ * The folio @folio will be made secure if possible, @uvcb will be passed
236
+ * as-is to the UVC.
237
+ *
238
+ * Return: 0 on success;
239
+ * -EBUSY if the folio is in writeback or has too many references;
240
+ * -E2BIG if the folio is large;
241
+ * -EAGAIN if the UVC needs to be attempted again;
242
+ * -ENXIO if the address is not mapped;
243
+ * -EINVAL if the UVC failed for other reasons.
244
+ *
245
+ * Context: The caller must hold exactly one extra reference on the folio
246
+ * (it's the same logic as split_folio())
247
+ */
248
+ int make_folio_secure (struct folio * folio , struct uv_cb_header * uvcb )
241
249
{
242
250
int expected , cc = 0 ;
243
251
252
+ if (folio_test_large (folio ))
253
+ return - E2BIG ;
244
254
if (folio_test_writeback (folio ))
245
- return - EAGAIN ;
246
- expected = expected_folio_refs (folio );
255
+ return - EBUSY ;
256
+ expected = expected_folio_refs (folio ) + 1 ;
247
257
if (!folio_ref_freeze (folio , expected ))
248
258
return - EBUSY ;
249
259
set_bit (PG_arch_1 , & folio -> flags );
@@ -267,251 +277,7 @@ static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
267
277
return - EAGAIN ;
268
278
return uvcb -> rc == 0x10a ? - ENXIO : - EINVAL ;
269
279
}
270
-
271
- /**
272
- * should_export_before_import - Determine whether an export is needed
273
- * before an import-like operation
274
- * @uvcb: the Ultravisor control block of the UVC to be performed
275
- * @mm: the mm of the process
276
- *
277
- * Returns whether an export is needed before every import-like operation.
278
- * This is needed for shared pages, which don't trigger a secure storage
279
- * exception when accessed from a different guest.
280
- *
281
- * Although considered as one, the Unpin Page UVC is not an actual import,
282
- * so it is not affected.
283
- *
284
- * No export is needed also when there is only one protected VM, because the
285
- * page cannot belong to the wrong VM in that case (there is no "other VM"
286
- * it can belong to).
287
- *
288
- * Return: true if an export is needed before every import, otherwise false.
289
- */
290
- static bool should_export_before_import (struct uv_cb_header * uvcb , struct mm_struct * mm )
291
- {
292
- /*
293
- * The misc feature indicates, among other things, that importing a
294
- * shared page from a different protected VM will automatically also
295
- * transfer its ownership.
296
- */
297
- if (uv_has_feature (BIT_UV_FEAT_MISC ))
298
- return false;
299
- if (uvcb -> cmd == UVC_CMD_UNPIN_PAGE_SHARED )
300
- return false;
301
- return atomic_read (& mm -> context .protected_count ) > 1 ;
302
- }
303
-
304
- /*
305
- * Drain LRU caches: the local one on first invocation and the ones of all
306
- * CPUs on successive invocations. Returns "true" on the first invocation.
307
- */
308
- static bool drain_lru (bool * drain_lru_called )
309
- {
310
- /*
311
- * If we have tried a local drain and the folio refcount
312
- * still does not match our expected safe value, try with a
313
- * system wide drain. This is needed if the pagevecs holding
314
- * the page are on a different CPU.
315
- */
316
- if (* drain_lru_called ) {
317
- lru_add_drain_all ();
318
- /* We give up here, don't retry immediately. */
319
- return false;
320
- }
321
- /*
322
- * We are here if the folio refcount does not match the
323
- * expected safe value. The main culprits are usually
324
- * pagevecs. With lru_add_drain() we drain the pagevecs
325
- * on the local CPU so that hopefully the refcount will
326
- * reach the expected safe value.
327
- */
328
- lru_add_drain ();
329
- * drain_lru_called = true;
330
- /* The caller should try again immediately */
331
- return true;
332
- }
333
-
334
- /*
335
- * Requests the Ultravisor to make a page accessible to a guest.
336
- * If it's brought in the first time, it will be cleared. If
337
- * it has been exported before, it will be decrypted and integrity
338
- * checked.
339
- */
340
- int gmap_make_secure (struct gmap * gmap , unsigned long gaddr , void * uvcb )
341
- {
342
- struct vm_area_struct * vma ;
343
- bool drain_lru_called = false;
344
- spinlock_t * ptelock ;
345
- unsigned long uaddr ;
346
- struct folio * folio ;
347
- pte_t * ptep ;
348
- int rc ;
349
-
350
- again :
351
- rc = - EFAULT ;
352
- mmap_read_lock (gmap -> mm );
353
-
354
- uaddr = __gmap_translate (gmap , gaddr );
355
- if (IS_ERR_VALUE (uaddr ))
356
- goto out ;
357
- vma = vma_lookup (gmap -> mm , uaddr );
358
- if (!vma )
359
- goto out ;
360
- /*
361
- * Secure pages cannot be huge and userspace should not combine both.
362
- * In case userspace does it anyway this will result in an -EFAULT for
363
- * the unpack. The guest is thus never reaching secure mode. If
364
- * userspace is playing dirty tricky with mapping huge pages later
365
- * on this will result in a segmentation fault.
366
- */
367
- if (is_vm_hugetlb_page (vma ))
368
- goto out ;
369
-
370
- rc = - ENXIO ;
371
- ptep = get_locked_pte (gmap -> mm , uaddr , & ptelock );
372
- if (!ptep )
373
- goto out ;
374
- if (pte_present (* ptep ) && !(pte_val (* ptep ) & _PAGE_INVALID ) && pte_write (* ptep )) {
375
- folio = page_folio (pte_page (* ptep ));
376
- rc = - EAGAIN ;
377
- if (folio_test_large (folio )) {
378
- rc = - E2BIG ;
379
- } else if (folio_trylock (folio )) {
380
- if (should_export_before_import (uvcb , gmap -> mm ))
381
- uv_convert_from_secure (PFN_PHYS (folio_pfn (folio )));
382
- rc = make_folio_secure (folio , uvcb );
383
- folio_unlock (folio );
384
- }
385
-
386
- /*
387
- * Once we drop the PTL, the folio may get unmapped and
388
- * freed immediately. We need a temporary reference.
389
- */
390
- if (rc == - EAGAIN || rc == - E2BIG )
391
- folio_get (folio );
392
- }
393
- pte_unmap_unlock (ptep , ptelock );
394
- out :
395
- mmap_read_unlock (gmap -> mm );
396
-
397
- switch (rc ) {
398
- case - E2BIG :
399
- folio_lock (folio );
400
- rc = split_folio (folio );
401
- folio_unlock (folio );
402
- folio_put (folio );
403
-
404
- switch (rc ) {
405
- case 0 :
406
- /* Splitting succeeded, try again immediately. */
407
- goto again ;
408
- case - EAGAIN :
409
- /* Additional folio references. */
410
- if (drain_lru (& drain_lru_called ))
411
- goto again ;
412
- return - EAGAIN ;
413
- case - EBUSY :
414
- /* Unexpected race. */
415
- return - EAGAIN ;
416
- }
417
- WARN_ON_ONCE (1 );
418
- return - ENXIO ;
419
- case - EAGAIN :
420
- /*
421
- * If we are here because the UVC returned busy or partial
422
- * completion, this is just a useless check, but it is safe.
423
- */
424
- folio_wait_writeback (folio );
425
- folio_put (folio );
426
- return - EAGAIN ;
427
- case - EBUSY :
428
- /* Additional folio references. */
429
- if (drain_lru (& drain_lru_called ))
430
- goto again ;
431
- return - EAGAIN ;
432
- case - ENXIO :
433
- if (gmap_fault (gmap , gaddr , FAULT_FLAG_WRITE ))
434
- return - EFAULT ;
435
- return - EAGAIN ;
436
- }
437
- return rc ;
438
- }
439
- EXPORT_SYMBOL_GPL (gmap_make_secure );
440
-
441
- int gmap_convert_to_secure (struct gmap * gmap , unsigned long gaddr )
442
- {
443
- struct uv_cb_cts uvcb = {
444
- .header .cmd = UVC_CMD_CONV_TO_SEC_STOR ,
445
- .header .len = sizeof (uvcb ),
446
- .guest_handle = gmap -> guest_handle ,
447
- .gaddr = gaddr ,
448
- };
449
-
450
- return gmap_make_secure (gmap , gaddr , & uvcb );
451
- }
452
- EXPORT_SYMBOL_GPL (gmap_convert_to_secure );
453
-
454
- /**
455
- * gmap_destroy_page - Destroy a guest page.
456
- * @gmap: the gmap of the guest
457
- * @gaddr: the guest address to destroy
458
- *
459
- * An attempt will be made to destroy the given guest page. If the attempt
460
- * fails, an attempt is made to export the page. If both attempts fail, an
461
- * appropriate error is returned.
462
- */
463
- int gmap_destroy_page (struct gmap * gmap , unsigned long gaddr )
464
- {
465
- struct vm_area_struct * vma ;
466
- struct folio_walk fw ;
467
- unsigned long uaddr ;
468
- struct folio * folio ;
469
- int rc ;
470
-
471
- rc = - EFAULT ;
472
- mmap_read_lock (gmap -> mm );
473
-
474
- uaddr = __gmap_translate (gmap , gaddr );
475
- if (IS_ERR_VALUE (uaddr ))
476
- goto out ;
477
- vma = vma_lookup (gmap -> mm , uaddr );
478
- if (!vma )
479
- goto out ;
480
- /*
481
- * Huge pages should not be able to become secure
482
- */
483
- if (is_vm_hugetlb_page (vma ))
484
- goto out ;
485
-
486
- rc = 0 ;
487
- folio = folio_walk_start (& fw , vma , uaddr , 0 );
488
- if (!folio )
489
- goto out ;
490
- /*
491
- * See gmap_make_secure(): large folios cannot be secure. Small
492
- * folio implies FW_LEVEL_PTE.
493
- */
494
- if (folio_test_large (folio ) || !pte_write (fw .pte ))
495
- goto out_walk_end ;
496
- rc = uv_destroy_folio (folio );
497
- /*
498
- * Fault handlers can race; it is possible that two CPUs will fault
499
- * on the same secure page. One CPU can destroy the page, reboot,
500
- * re-enter secure mode and import it, while the second CPU was
501
- * stuck at the beginning of the handler. At some point the second
502
- * CPU will be able to progress, and it will not be able to destroy
503
- * the page. In that case we do not want to terminate the process,
504
- * we instead try to export the page.
505
- */
506
- if (rc )
507
- rc = uv_convert_from_secure_folio (folio );
508
- out_walk_end :
509
- folio_walk_end (& fw , vma );
510
- out :
511
- mmap_read_unlock (gmap -> mm );
512
- return rc ;
513
- }
514
- EXPORT_SYMBOL_GPL (gmap_destroy_page );
280
+ EXPORT_SYMBOL_GPL (make_folio_secure );
515
281
516
282
/*
517
283
* To be called with the folio locked or with an extra reference! This will
0 commit comments