@@ -349,40 +349,39 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
349
349
return true;
350
350
}
351
351
352
- static int cxl_mbox_cmd_ctor (struct cxl_mbox_cmd * mbox ,
353
- struct cxl_memdev_state * mds , u16 opcode ,
352
+ static int cxl_mbox_cmd_ctor (struct cxl_mbox_cmd * mbox_cmd ,
353
+ struct cxl_mailbox * cxl_mbox , u16 opcode ,
354
354
size_t in_size , size_t out_size , u64 in_payload )
355
355
{
356
- struct cxl_mailbox * cxl_mbox = & mds -> cxlds .cxl_mbox ;
357
- * mbox = (struct cxl_mbox_cmd ) {
356
+ * mbox_cmd = (struct cxl_mbox_cmd ) {
358
357
.opcode = opcode ,
359
358
.size_in = in_size ,
360
359
};
361
360
362
361
if (in_size ) {
363
- mbox -> payload_in = vmemdup_user (u64_to_user_ptr (in_payload ),
364
- in_size );
365
- if (IS_ERR (mbox -> payload_in ))
366
- return PTR_ERR (mbox -> payload_in );
362
+ mbox_cmd -> payload_in = vmemdup_user (u64_to_user_ptr (in_payload ),
363
+ in_size );
364
+ if (IS_ERR (mbox_cmd -> payload_in ))
365
+ return PTR_ERR (mbox_cmd -> payload_in );
367
366
368
- if (!cxl_payload_from_user_allowed (opcode , mbox -> payload_in )) {
369
- dev_dbg (mds -> cxlds . dev , "%s: input payload not allowed\n" ,
367
+ if (!cxl_payload_from_user_allowed (opcode , mbox_cmd -> payload_in )) {
368
+ dev_dbg (cxl_mbox -> host , "%s: input payload not allowed\n" ,
370
369
cxl_mem_opcode_to_name (opcode ));
371
- kvfree (mbox -> payload_in );
370
+ kvfree (mbox_cmd -> payload_in );
372
371
return - EBUSY ;
373
372
}
374
373
}
375
374
376
375
/* Prepare to handle a full payload for variable sized output */
377
376
if (out_size == CXL_VARIABLE_PAYLOAD )
378
- mbox -> size_out = cxl_mbox -> payload_size ;
377
+ mbox_cmd -> size_out = cxl_mbox -> payload_size ;
379
378
else
380
- mbox -> size_out = out_size ;
379
+ mbox_cmd -> size_out = out_size ;
381
380
382
- if (mbox -> size_out ) {
383
- mbox -> payload_out = kvzalloc (mbox -> size_out , GFP_KERNEL );
384
- if (!mbox -> payload_out ) {
385
- kvfree (mbox -> payload_in );
381
+ if (mbox_cmd -> size_out ) {
382
+ mbox_cmd -> payload_out = kvzalloc (mbox_cmd -> size_out , GFP_KERNEL );
383
+ if (!mbox_cmd -> payload_out ) {
384
+ kvfree (mbox_cmd -> payload_in );
386
385
return - ENOMEM ;
387
386
}
388
387
}
@@ -397,10 +396,8 @@ static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
397
396
398
397
static int cxl_to_mem_cmd_raw (struct cxl_mem_command * mem_cmd ,
399
398
const struct cxl_send_command * send_cmd ,
400
- struct cxl_memdev_state * mds )
399
+ struct cxl_mailbox * cxl_mbox )
401
400
{
402
- struct cxl_mailbox * cxl_mbox = & mds -> cxlds .cxl_mbox ;
403
-
404
401
if (send_cmd -> raw .rsvd )
405
402
return - EINVAL ;
406
403
@@ -415,7 +412,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
415
412
if (!cxl_mem_raw_command_allowed (send_cmd -> raw .opcode ))
416
413
return - EPERM ;
417
414
418
- dev_WARN_ONCE (mds -> cxlds . dev , true, "raw command path used\n" );
415
+ dev_WARN_ONCE (cxl_mbox -> host , true, "raw command path used\n" );
419
416
420
417
* mem_cmd = (struct cxl_mem_command ) {
421
418
.info = {
@@ -431,7 +428,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
431
428
432
429
static int cxl_to_mem_cmd (struct cxl_mem_command * mem_cmd ,
433
430
const struct cxl_send_command * send_cmd ,
434
- struct cxl_memdev_state * mds )
431
+ struct cxl_mailbox * cxl_mbox )
435
432
{
436
433
struct cxl_mem_command * c = & cxl_mem_commands [send_cmd -> id ];
437
434
const struct cxl_command_info * info = & c -> info ;
@@ -446,11 +443,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
446
443
return - EINVAL ;
447
444
448
445
/* Check that the command is enabled for hardware */
449
- if (!test_bit (info -> id , mds -> enabled_cmds ))
446
+ if (!test_bit (info -> id , cxl_mbox -> enabled_cmds ))
450
447
return - ENOTTY ;
451
448
452
449
/* Check that the command is not claimed for exclusive kernel use */
453
- if (test_bit (info -> id , mds -> exclusive_cmds ))
450
+ if (test_bit (info -> id , cxl_mbox -> exclusive_cmds ))
454
451
return - EBUSY ;
455
452
456
453
/* Check the input buffer is the expected size */
@@ -479,7 +476,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
479
476
/**
480
477
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
481
478
* @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
482
- * @mds: The driver data for the operation
479
+ * @cxl_mbox: CXL mailbox context
483
480
* @send_cmd: &struct cxl_send_command copied in from userspace.
484
481
*
485
482
* Return:
@@ -494,10 +491,9 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
494
491
* safe to send to the hardware.
495
492
*/
496
493
static int cxl_validate_cmd_from_user (struct cxl_mbox_cmd * mbox_cmd ,
497
- struct cxl_memdev_state * mds ,
494
+ struct cxl_mailbox * cxl_mbox ,
498
495
const struct cxl_send_command * send_cmd )
499
496
{
500
- struct cxl_mailbox * cxl_mbox = & mds -> cxlds .cxl_mbox ;
501
497
struct cxl_mem_command mem_cmd ;
502
498
int rc ;
503
499
@@ -514,24 +510,23 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
514
510
515
511
/* Sanitize and construct a cxl_mem_command */
516
512
if (send_cmd -> id == CXL_MEM_COMMAND_ID_RAW )
517
- rc = cxl_to_mem_cmd_raw (& mem_cmd , send_cmd , mds );
513
+ rc = cxl_to_mem_cmd_raw (& mem_cmd , send_cmd , cxl_mbox );
518
514
else
519
- rc = cxl_to_mem_cmd (& mem_cmd , send_cmd , mds );
515
+ rc = cxl_to_mem_cmd (& mem_cmd , send_cmd , cxl_mbox );
520
516
521
517
if (rc )
522
518
return rc ;
523
519
524
520
/* Sanitize and construct a cxl_mbox_cmd */
525
- return cxl_mbox_cmd_ctor (mbox_cmd , mds , mem_cmd .opcode ,
521
+ return cxl_mbox_cmd_ctor (mbox_cmd , cxl_mbox , mem_cmd .opcode ,
526
522
mem_cmd .info .size_in , mem_cmd .info .size_out ,
527
523
send_cmd -> in .payload );
528
524
}
529
525
530
- int cxl_query_cmd (struct cxl_memdev * cxlmd ,
526
+ int cxl_query_cmd (struct cxl_mailbox * cxl_mbox ,
531
527
struct cxl_mem_query_commands __user * q )
532
528
{
533
- struct cxl_memdev_state * mds = to_cxl_memdev_state (cxlmd -> cxlds );
534
- struct device * dev = & cxlmd -> dev ;
529
+ struct device * dev = cxl_mbox -> host ;
535
530
struct cxl_mem_command * cmd ;
536
531
u32 n_commands ;
537
532
int j = 0 ;
@@ -552,9 +547,9 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
552
547
cxl_for_each_cmd (cmd ) {
553
548
struct cxl_command_info info = cmd -> info ;
554
549
555
- if (test_bit (info .id , mds -> enabled_cmds ))
550
+ if (test_bit (info .id , cxl_mbox -> enabled_cmds ))
556
551
info .flags |= CXL_MEM_COMMAND_FLAG_ENABLED ;
557
- if (test_bit (info .id , mds -> exclusive_cmds ))
552
+ if (test_bit (info .id , cxl_mbox -> exclusive_cmds ))
558
553
info .flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE ;
559
554
560
555
if (copy_to_user (& q -> commands [j ++ ], & info , sizeof (info )))
@@ -569,7 +564,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
569
564
570
565
/**
571
566
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
572
- * @mds : The driver data for the operation
567
+ * @cxl_mbox : The mailbox context for the operation.
573
568
* @mbox_cmd: The validated mailbox command.
574
569
* @out_payload: Pointer to userspace's output payload.
575
570
* @size_out: (Input) Max payload size to copy out.
@@ -590,13 +585,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
590
585
*
591
586
* See cxl_send_cmd().
592
587
*/
593
- static int handle_mailbox_cmd_from_user (struct cxl_memdev_state * mds ,
588
+ static int handle_mailbox_cmd_from_user (struct cxl_mailbox * cxl_mbox ,
594
589
struct cxl_mbox_cmd * mbox_cmd ,
595
590
u64 out_payload , s32 * size_out ,
596
591
u32 * retval )
597
592
{
598
- struct cxl_mailbox * cxl_mbox = & mds -> cxlds .cxl_mbox ;
599
- struct device * dev = mds -> cxlds .dev ;
593
+ struct device * dev = cxl_mbox -> host ;
600
594
int rc ;
601
595
602
596
dev_dbg (dev ,
@@ -633,10 +627,9 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
633
627
return rc ;
634
628
}
635
629
636
- int cxl_send_cmd (struct cxl_memdev * cxlmd , struct cxl_send_command __user * s )
630
+ int cxl_send_cmd (struct cxl_mailbox * cxl_mbox , struct cxl_send_command __user * s )
637
631
{
638
- struct cxl_memdev_state * mds = to_cxl_memdev_state (cxlmd -> cxlds );
639
- struct device * dev = & cxlmd -> dev ;
632
+ struct device * dev = cxl_mbox -> host ;
640
633
struct cxl_send_command send ;
641
634
struct cxl_mbox_cmd mbox_cmd ;
642
635
int rc ;
@@ -646,11 +639,11 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
646
639
if (copy_from_user (& send , s , sizeof (send )))
647
640
return - EFAULT ;
648
641
649
- rc = cxl_validate_cmd_from_user (& mbox_cmd , mds , & send );
642
+ rc = cxl_validate_cmd_from_user (& mbox_cmd , cxl_mbox , & send );
650
643
if (rc )
651
644
return rc ;
652
645
653
- rc = handle_mailbox_cmd_from_user (mds , & mbox_cmd , send .out .payload ,
646
+ rc = handle_mailbox_cmd_from_user (cxl_mbox , & mbox_cmd , send .out .payload ,
654
647
& send .out .size , & send .retval );
655
648
if (rc )
656
649
return rc ;
@@ -724,6 +717,7 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
724
717
*/
725
718
static void cxl_walk_cel (struct cxl_memdev_state * mds , size_t size , u8 * cel )
726
719
{
720
+ struct cxl_mailbox * cxl_mbox = & mds -> cxlds .cxl_mbox ;
727
721
struct cxl_cel_entry * cel_entry ;
728
722
const int cel_entries = size / sizeof (* cel_entry );
729
723
struct device * dev = mds -> cxlds .dev ;
@@ -737,7 +731,7 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
737
731
int enabled = 0 ;
738
732
739
733
if (cmd ) {
740
- set_bit (cmd -> info .id , mds -> enabled_cmds );
734
+ set_bit (cmd -> info .id , cxl_mbox -> enabled_cmds );
741
735
enabled ++ ;
742
736
}
743
737
@@ -807,6 +801,7 @@ static const uuid_t log_uuid[] = {
807
801
*/
808
802
int cxl_enumerate_cmds (struct cxl_memdev_state * mds )
809
803
{
804
+ struct cxl_mailbox * cxl_mbox = & mds -> cxlds .cxl_mbox ;
810
805
struct cxl_mbox_get_supported_logs * gsl ;
811
806
struct device * dev = mds -> cxlds .dev ;
812
807
struct cxl_mem_command * cmd ;
@@ -845,7 +840,7 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
845
840
/* In case CEL was bogus, enable some default commands. */
846
841
cxl_for_each_cmd (cmd )
847
842
if (cmd -> flags & CXL_CMD_FLAG_FORCE_ENABLE )
848
- set_bit (cmd - > info .id , mds - > enabled_cmds );
843
+ set_bit (cmd - > info .id , cxl_mbox - > enabled_cmds );
849
844
850
845
/* Found the required CEL */
851
846
rc = 0 ;
@@ -1448,6 +1443,7 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1448
1443
mutex_init (& mds -> event .log_lock );
1449
1444
mds -> cxlds .dev = dev ;
1450
1445
mds -> cxlds .reg_map .host = dev ;
1446
+ mds -> cxlds .cxl_mbox .host = dev ;
1451
1447
mds -> cxlds .reg_map .resource = CXL_RESOURCE_NONE ;
1452
1448
mds -> cxlds .type = CXL_DEVTYPE_CLASSMEM ;
1453
1449
mds -> ram_perf .qos_class = CXL_QOS_CLASS_INVALID ;
0 commit comments