@@ -190,9 +190,10 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
190
190
int ret ;
191
191
192
192
/* Check VF state */
193
- if (unlikely (hisi_qm_wait_mb_ready (qm ))) {
193
+ ret = hisi_qm_wait_mb_ready (qm );
194
+ if (unlikely (ret )) {
194
195
dev_err (& qm -> pdev -> dev , "QM device is not ready to write\n" );
195
- return - EBUSY ;
196
+ return ret ;
196
197
}
197
198
198
199
ret = qm_write_regs (qm , QM_VF_AEQ_INT_MASK , & vf_data -> aeq_int_mask , 1 );
@@ -325,13 +326,15 @@ static void qm_dev_cmd_init(struct hisi_qm *qm)
325
326
static int vf_qm_cache_wb (struct hisi_qm * qm )
326
327
{
327
328
unsigned int val ;
329
+ int ret ;
328
330
329
331
writel (0x1 , qm -> io_base + QM_CACHE_WB_START );
330
- if ( readl_relaxed_poll_timeout (qm -> io_base + QM_CACHE_WB_DONE ,
332
+ ret = readl_relaxed_poll_timeout (qm -> io_base + QM_CACHE_WB_DONE ,
331
333
val , val & BIT (0 ), MB_POLL_PERIOD_US ,
332
- MB_POLL_TIMEOUT_US )) {
334
+ MB_POLL_TIMEOUT_US );
335
+ if (ret ) {
333
336
dev_err (& qm -> pdev -> dev , "vf QM writeback sqc cache fail\n" );
334
- return - EINVAL ;
337
+ return ret ;
335
338
}
336
339
337
340
return 0 ;
@@ -350,6 +353,32 @@ static int vf_qm_func_stop(struct hisi_qm *qm)
350
353
return hisi_qm_mb (qm , QM_MB_CMD_PAUSE_QM , 0 , 0 , 0 );
351
354
}
352
355
356
+ static int vf_qm_version_check (struct acc_vf_data * vf_data , struct device * dev )
357
+ {
358
+ switch (vf_data -> acc_magic ) {
359
+ case ACC_DEV_MAGIC_V2 :
360
+ if (vf_data -> major_ver != ACC_DRV_MAJOR_VER ) {
361
+ dev_info (dev , "migration driver version<%u.%u> not match!\n" ,
362
+ vf_data -> major_ver , vf_data -> minor_ver );
363
+ return - EINVAL ;
364
+ }
365
+ break ;
366
+ case ACC_DEV_MAGIC_V1 :
367
+ /* Correct dma address */
368
+ vf_data -> eqe_dma = vf_data -> qm_eqc_dw [QM_XQC_ADDR_HIGH ];
369
+ vf_data -> eqe_dma <<= QM_XQC_ADDR_OFFSET ;
370
+ vf_data -> eqe_dma |= vf_data -> qm_eqc_dw [QM_XQC_ADDR_LOW ];
371
+ vf_data -> aeqe_dma = vf_data -> qm_aeqc_dw [QM_XQC_ADDR_HIGH ];
372
+ vf_data -> aeqe_dma <<= QM_XQC_ADDR_OFFSET ;
373
+ vf_data -> aeqe_dma |= vf_data -> qm_aeqc_dw [QM_XQC_ADDR_LOW ];
374
+ break ;
375
+ default :
376
+ return - EINVAL ;
377
+ }
378
+
379
+ return 0 ;
380
+ }
381
+
353
382
static int vf_qm_check_match (struct hisi_acc_vf_core_device * hisi_acc_vdev ,
354
383
struct hisi_acc_vf_migration_file * migf )
355
384
{
@@ -363,9 +392,10 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
363
392
if (migf -> total_length < QM_MATCH_SIZE || hisi_acc_vdev -> match_done )
364
393
return 0 ;
365
394
366
- if (vf_data -> acc_magic != ACC_DEV_MAGIC ) {
395
+ ret = vf_qm_version_check (vf_data , dev );
396
+ if (ret ) {
367
397
dev_err (dev , "failed to match ACC_DEV_MAGIC\n" );
368
- return - EINVAL ;
398
+ return ret ;
369
399
}
370
400
371
401
if (vf_data -> dev_id != hisi_acc_vdev -> vf_dev -> device ) {
@@ -377,7 +407,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
377
407
ret = qm_get_vft (vf_qm , & vf_qm -> qp_base );
378
408
if (ret <= 0 ) {
379
409
dev_err (dev , "failed to get vft qp nums\n" );
380
- return - EINVAL ;
410
+ return ret ;
381
411
}
382
412
383
413
if (ret != vf_data -> qp_num ) {
@@ -399,13 +429,6 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
399
429
return - EINVAL ;
400
430
}
401
431
402
- ret = qm_write_regs (vf_qm , QM_VF_STATE , & vf_data -> vf_qm_state , 1 );
403
- if (ret ) {
404
- dev_err (dev , "failed to write QM_VF_STATE\n" );
405
- return ret ;
406
- }
407
-
408
- hisi_acc_vdev -> vf_qm_state = vf_data -> vf_qm_state ;
409
432
hisi_acc_vdev -> match_done = true;
410
433
return 0 ;
411
434
}
@@ -418,7 +441,9 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
418
441
int vf_id = hisi_acc_vdev -> vf_id ;
419
442
int ret ;
420
443
421
- vf_data -> acc_magic = ACC_DEV_MAGIC ;
444
+ vf_data -> acc_magic = ACC_DEV_MAGIC_V2 ;
445
+ vf_data -> major_ver = ACC_DRV_MAJOR_VER ;
446
+ vf_data -> minor_ver = ACC_DRV_MINOR_VER ;
422
447
/* Save device id */
423
448
vf_data -> dev_id = hisi_acc_vdev -> vf_dev -> device ;
424
449
@@ -441,6 +466,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
441
466
return 0 ;
442
467
}
443
468
469
+ static void vf_qm_xeqc_save (struct hisi_qm * qm ,
470
+ struct hisi_acc_vf_migration_file * migf )
471
+ {
472
+ struct acc_vf_data * vf_data = & migf -> vf_data ;
473
+ u16 eq_head , aeq_head ;
474
+
475
+ eq_head = vf_data -> qm_eqc_dw [0 ] & 0xFFFF ;
476
+ qm_db (qm , 0 , QM_DOORBELL_CMD_EQ , eq_head , 0 );
477
+
478
+ aeq_head = vf_data -> qm_aeqc_dw [0 ] & 0xFFFF ;
479
+ qm_db (qm , 0 , QM_DOORBELL_CMD_AEQ , aeq_head , 0 );
480
+ }
481
+
444
482
static int vf_qm_load_data (struct hisi_acc_vf_core_device * hisi_acc_vdev ,
445
483
struct hisi_acc_vf_migration_file * migf )
446
484
{
@@ -456,6 +494,20 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
456
494
if (migf -> total_length < sizeof (struct acc_vf_data ))
457
495
return - EINVAL ;
458
496
497
+ if (!vf_data -> eqe_dma || !vf_data -> aeqe_dma ||
498
+ !vf_data -> sqc_dma || !vf_data -> cqc_dma ) {
499
+ dev_info (dev , "resume dma addr is NULL!\n" );
500
+ hisi_acc_vdev -> vf_qm_state = QM_NOT_READY ;
501
+ return 0 ;
502
+ }
503
+
504
+ ret = qm_write_regs (qm , QM_VF_STATE , & vf_data -> vf_qm_state , 1 );
505
+ if (ret ) {
506
+ dev_err (dev , "failed to write QM_VF_STATE\n" );
507
+ return ret ;
508
+ }
509
+ hisi_acc_vdev -> vf_qm_state = vf_data -> vf_qm_state ;
510
+
459
511
qm -> eqe_dma = vf_data -> eqe_dma ;
460
512
qm -> aeqe_dma = vf_data -> aeqe_dma ;
461
513
qm -> sqc_dma = vf_data -> sqc_dma ;
@@ -493,27 +545,27 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data)
493
545
494
546
ret = qm_get_regs (vf_qm , vf_data );
495
547
if (ret )
496
- return - EINVAL ;
548
+ return ret ;
497
549
498
550
/* Every reg is 32 bit, the dma address is 64 bit. */
499
- vf_data -> eqe_dma = vf_data -> qm_eqc_dw [1 ];
551
+ vf_data -> eqe_dma = vf_data -> qm_eqc_dw [QM_XQC_ADDR_HIGH ];
500
552
vf_data -> eqe_dma <<= QM_XQC_ADDR_OFFSET ;
501
- vf_data -> eqe_dma |= vf_data -> qm_eqc_dw [0 ];
502
- vf_data -> aeqe_dma = vf_data -> qm_aeqc_dw [1 ];
553
+ vf_data -> eqe_dma |= vf_data -> qm_eqc_dw [QM_XQC_ADDR_LOW ];
554
+ vf_data -> aeqe_dma = vf_data -> qm_aeqc_dw [QM_XQC_ADDR_HIGH ];
503
555
vf_data -> aeqe_dma <<= QM_XQC_ADDR_OFFSET ;
504
- vf_data -> aeqe_dma |= vf_data -> qm_aeqc_dw [0 ];
556
+ vf_data -> aeqe_dma |= vf_data -> qm_aeqc_dw [QM_XQC_ADDR_LOW ];
505
557
506
558
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
507
559
ret = qm_get_sqc (vf_qm , & vf_data -> sqc_dma );
508
560
if (ret ) {
509
561
dev_err (dev , "failed to read SQC addr!\n" );
510
- return - EINVAL ;
562
+ return ret ;
511
563
}
512
564
513
565
ret = qm_get_cqc (vf_qm , & vf_data -> cqc_dma );
514
566
if (ret ) {
515
567
dev_err (dev , "failed to read CQC addr!\n" );
516
- return - EINVAL ;
568
+ return ret ;
517
569
}
518
570
519
571
return 0 ;
@@ -524,7 +576,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
524
576
{
525
577
struct acc_vf_data * vf_data = & migf -> vf_data ;
526
578
struct hisi_qm * vf_qm = & hisi_acc_vdev -> vf_qm ;
527
- struct device * dev = & vf_qm -> pdev -> dev ;
528
579
int ret ;
529
580
530
581
if (unlikely (qm_wait_dev_not_ready (vf_qm ))) {
@@ -538,17 +589,14 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
538
589
vf_data -> vf_qm_state = QM_READY ;
539
590
hisi_acc_vdev -> vf_qm_state = vf_data -> vf_qm_state ;
540
591
541
- ret = vf_qm_cache_wb (vf_qm );
542
- if (ret ) {
543
- dev_err (dev , "failed to writeback QM Cache!\n" );
544
- return ret ;
545
- }
546
-
547
592
ret = vf_qm_read_data (vf_qm , vf_data );
548
593
if (ret )
549
- return - EINVAL ;
594
+ return ret ;
550
595
551
596
migf -> total_length = sizeof (struct acc_vf_data );
597
+ /* Save eqc and aeqc interrupt information */
598
+ vf_qm_xeqc_save (vf_qm , migf );
599
+
552
600
return 0 ;
553
601
}
554
602
@@ -967,6 +1015,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev
967
1015
dev_err (dev , "failed to check QM INT state!\n" );
968
1016
return ret ;
969
1017
}
1018
+
1019
+ ret = vf_qm_cache_wb (vf_qm );
1020
+ if (ret ) {
1021
+ dev_err (dev , "failed to writeback QM cache!\n" );
1022
+ return ret ;
1023
+ }
1024
+
970
1025
return 0 ;
971
1026
}
972
1027
@@ -1327,7 +1382,7 @@ static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vde
1327
1382
ret = qm_wait_dev_not_ready (vf_qm );
1328
1383
if (ret ) {
1329
1384
seq_puts (seq , "VF device not ready!\n" );
1330
- return - EBUSY ;
1385
+ return ret ;
1331
1386
}
1332
1387
1333
1388
return 0 ;
@@ -1463,6 +1518,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1463
1518
struct hisi_acc_vf_core_device * hisi_acc_vdev = hisi_acc_get_vf_dev (core_vdev );
1464
1519
struct hisi_qm * vf_qm = & hisi_acc_vdev -> vf_qm ;
1465
1520
1521
+ hisi_acc_vf_disable_fds (hisi_acc_vdev );
1466
1522
mutex_lock (& hisi_acc_vdev -> open_mutex );
1467
1523
hisi_acc_vdev -> dev_opened = false;
1468
1524
iounmap (vf_qm -> io_base );
@@ -1485,6 +1541,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
1485
1541
hisi_acc_vdev -> vf_id = pci_iov_vf_id (pdev ) + 1 ;
1486
1542
hisi_acc_vdev -> pf_qm = pf_qm ;
1487
1543
hisi_acc_vdev -> vf_dev = pdev ;
1544
+ hisi_acc_vdev -> vf_qm_state = QM_NOT_READY ;
1488
1545
mutex_init (& hisi_acc_vdev -> state_mutex );
1489
1546
mutex_init (& hisi_acc_vdev -> open_mutex );
1490
1547
0 commit comments