25
25
/* RX mailbox client buffer max length */
26
26
#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
27
27
sizeof(struct zynqmp_ipi_message))
28
+
29
+ #define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
30
+ (uint32_t)'m' << 8 | (uint32_t)'p')
31
+
28
32
/*
29
33
* settings for RPU cluster mode which
30
34
* reflects possible values of xlnx,cluster-mode dt-property
@@ -73,6 +77,26 @@ struct mbox_info {
73
77
struct mbox_chan * rx_chan ;
74
78
};
75
79
80
+ /**
81
+ * struct rsc_tbl_data
82
+ *
83
+ * Platform specific data structure used to sync resource table address.
84
+ * It's important to maintain order and size of each field on remote side.
85
+ *
86
+ * @version: version of data structure
87
+ * @magic_num: 32-bit magic number.
88
+ * @comp_magic_num: complement of above magic number
89
+ * @rsc_tbl_size: resource table size
90
+ * @rsc_tbl: resource table address
91
+ */
92
+ struct rsc_tbl_data {
93
+ const int version ;
94
+ const u32 magic_num ;
95
+ const u32 comp_magic_num ;
96
+ const u32 rsc_tbl_size ;
97
+ const uintptr_t rsc_tbl ;
98
+ } __packed ;
99
+
76
100
/*
77
101
* Hardcoded TCM bank values. This will stay in driver to maintain backward
78
102
* compatibility with device-tree that does not have TCM information.
@@ -95,20 +119,24 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
95
119
/**
96
120
* struct zynqmp_r5_core
97
121
*
122
+ * @rsc_tbl_va: resource table virtual address
98
123
* @dev: device of RPU instance
99
124
* @np: device node of RPU instance
100
125
* @tcm_bank_count: number TCM banks accessible to this RPU
101
126
* @tcm_banks: array of each TCM bank data
102
127
* @rproc: rproc handle
128
+ * @rsc_tbl_size: resource table size retrieved from remote
103
129
* @pm_domain_id: RPU CPU power domain id
104
130
* @ipi: pointer to mailbox information
105
131
*/
106
132
struct zynqmp_r5_core {
133
+ void __iomem * rsc_tbl_va ;
107
134
struct device * dev ;
108
135
struct device_node * np ;
109
136
int tcm_bank_count ;
110
137
struct mem_bank_data * * tcm_banks ;
111
138
struct rproc * rproc ;
139
+ u32 rsc_tbl_size ;
112
140
u32 pm_domain_id ;
113
141
struct mbox_info * ipi ;
114
142
};
@@ -557,6 +585,14 @@ static int add_tcm_banks(struct rproc *rproc)
557
585
dev_dbg (dev , "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx" ,
558
586
bank_name , bank_addr , da , bank_size );
559
587
588
+ /*
589
+ * In DETACHED state firmware is already running so no need to
590
+ * request add TCM registers. However, request TCM PD node to let
591
+ * platform management firmware know that TCM is in use.
592
+ */
593
+ if (rproc -> state == RPROC_DETACHED )
594
+ continue ;
595
+
560
596
rproc_mem = rproc_mem_entry_init (dev , NULL , bank_addr ,
561
597
bank_size , da ,
562
598
tcm_mem_map , tcm_mem_unmap ,
@@ -662,6 +698,107 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
662
698
return 0 ;
663
699
}
664
700
701
+ static struct resource_table * zynqmp_r5_get_loaded_rsc_table (struct rproc * rproc ,
702
+ size_t * size )
703
+ {
704
+ struct zynqmp_r5_core * r5_core ;
705
+
706
+ r5_core = rproc -> priv ;
707
+
708
+ * size = r5_core -> rsc_tbl_size ;
709
+
710
+ return (struct resource_table * )r5_core -> rsc_tbl_va ;
711
+ }
712
+
713
+ static int zynqmp_r5_get_rsc_table_va (struct zynqmp_r5_core * r5_core )
714
+ {
715
+ struct resource_table * rsc_tbl_addr ;
716
+ struct device * dev = r5_core -> dev ;
717
+ struct rsc_tbl_data * rsc_data_va ;
718
+ struct resource res_mem ;
719
+ struct device_node * np ;
720
+ int ret ;
721
+
722
+ /*
723
+ * It is expected from remote processor firmware to provide resource
724
+ * table address via struct rsc_tbl_data data structure.
725
+ * Start address of first entry under "memory-region" property list
726
+ * contains that data structure which holds resource table address, size
727
+ * and some magic number to validate correct resource table entry.
728
+ */
729
+ np = of_parse_phandle (r5_core -> np , "memory-region" , 0 );
730
+ if (!np ) {
731
+ dev_err (dev , "failed to get memory region dev node\n" );
732
+ return - EINVAL ;
733
+ }
734
+
735
+ ret = of_address_to_resource (np , 0 , & res_mem );
736
+ of_node_put (np );
737
+ if (ret ) {
738
+ dev_err (dev , "failed to get memory-region resource addr\n" );
739
+ return - EINVAL ;
740
+ }
741
+
742
+ rsc_data_va = (struct rsc_tbl_data * )ioremap_wc (res_mem .start ,
743
+ sizeof (struct rsc_tbl_data ));
744
+ if (!rsc_data_va ) {
745
+ dev_err (dev , "failed to map resource table data address\n" );
746
+ return - EIO ;
747
+ }
748
+
749
+ /*
750
+ * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
751
+ * do not consider resource table address valid and don't attach
752
+ */
753
+ if (rsc_data_va -> magic_num != RSC_TBL_XLNX_MAGIC ||
754
+ rsc_data_va -> comp_magic_num != ~RSC_TBL_XLNX_MAGIC ) {
755
+ dev_dbg (dev , "invalid magic number, won't attach\n" );
756
+ return - EINVAL ;
757
+ }
758
+
759
+ r5_core -> rsc_tbl_va = ioremap_wc (rsc_data_va -> rsc_tbl ,
760
+ rsc_data_va -> rsc_tbl_size );
761
+ if (!r5_core -> rsc_tbl_va ) {
762
+ dev_err (dev , "failed to get resource table va\n" );
763
+ return - EINVAL ;
764
+ }
765
+
766
+ rsc_tbl_addr = (struct resource_table * )r5_core -> rsc_tbl_va ;
767
+
768
+ /*
769
+ * As of now resource table version 1 is expected. Don't fail to attach
770
+ * but warn users about it.
771
+ */
772
+ if (rsc_tbl_addr -> ver != 1 )
773
+ dev_warn (dev , "unexpected resource table version %d\n" ,
774
+ rsc_tbl_addr -> ver );
775
+
776
+ r5_core -> rsc_tbl_size = rsc_data_va -> rsc_tbl_size ;
777
+
778
+ iounmap ((void __iomem * )rsc_data_va );
779
+
780
+ return 0 ;
781
+ }
782
+
783
+ static int zynqmp_r5_attach (struct rproc * rproc )
784
+ {
785
+ dev_dbg (& rproc -> dev , "rproc %d attached\n" , rproc -> index );
786
+
787
+ return 0 ;
788
+ }
789
+
790
+ static int zynqmp_r5_detach (struct rproc * rproc )
791
+ {
792
+ /*
793
+ * Generate last notification to remote after clearing virtio flag.
794
+ * Remote can avoid polling on virtio reset flag if kick is generated
795
+ * during detach by host and check virtio reset flag on kick interrupt.
796
+ */
797
+ zynqmp_r5_rproc_kick (rproc , 0 );
798
+
799
+ return 0 ;
800
+ }
801
+
665
802
static const struct rproc_ops zynqmp_r5_rproc_ops = {
666
803
.prepare = zynqmp_r5_rproc_prepare ,
667
804
.unprepare = zynqmp_r5_rproc_unprepare ,
@@ -673,6 +810,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
673
810
.sanity_check = rproc_elf_sanity_check ,
674
811
.get_boot_addr = rproc_elf_get_boot_addr ,
675
812
.kick = zynqmp_r5_rproc_kick ,
813
+ .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table ,
814
+ .attach = zynqmp_r5_attach ,
815
+ .detach = zynqmp_r5_detach ,
676
816
};
677
817
678
818
/**
@@ -723,6 +863,16 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
723
863
goto free_rproc ;
724
864
}
725
865
866
+ /*
867
+ * If firmware is already available in the memory then move rproc state
868
+ * to DETACHED. Firmware can be preloaded via debugger or by any other
869
+ * agent (processors) in the system.
870
+ * If firmware isn't available in the memory and resource table isn't
871
+ * found, then rproc state remains OFFLINE.
872
+ */
873
+ if (!zynqmp_r5_get_rsc_table_va (r5_core ))
874
+ r5_rproc -> state = RPROC_DETACHED ;
875
+
726
876
r5_core -> rproc = r5_rproc ;
727
877
return r5_core ;
728
878
@@ -1134,6 +1284,7 @@ static void zynqmp_r5_cluster_exit(void *data)
1134
1284
for (i = 0 ; i < cluster -> core_count ; i ++ ) {
1135
1285
r5_core = cluster -> r5_cores [i ];
1136
1286
zynqmp_r5_free_mbox (r5_core -> ipi );
1287
+ iounmap (r5_core -> rsc_tbl_va );
1137
1288
of_reserved_mem_device_release (r5_core -> dev );
1138
1289
put_device (r5_core -> dev );
1139
1290
rproc_del (r5_core -> rproc );
0 commit comments