9
9
#include <linux/errno.h>
10
10
#include <linux/export.h>
11
11
#include <linux/io.h>
12
+ #include <linux/iopoll.h>
12
13
#include <linux/slab.h>
13
14
14
15
#include "clk.h"
15
16
17
+ #define TIMEOUT_US 500U
18
+
16
19
#define CCM_DIV_SHIFT 0
17
20
#define CCM_DIV_WIDTH 8
18
21
#define CCM_MUX_SHIFT 8
19
22
#define CCM_MUX_MASK 3
20
23
#define CCM_OFF_SHIFT 24
24
+ #define CCM_BUSY_SHIFT 28
21
25
26
+ #define STAT_OFFSET 0x4
22
27
#define AUTHEN_OFFSET 0x30
23
28
#define TZ_NS_SHIFT 9
24
29
#define TZ_NS_MASK BIT(9)
25
30
31
+ #define WHITE_LIST_SHIFT 16
32
+
33
+ static int imx93_clk_composite_wait_ready (struct clk_hw * hw , void __iomem * reg )
34
+ {
35
+ int ret ;
36
+ u32 val ;
37
+
38
+ ret = readl_poll_timeout_atomic (reg + STAT_OFFSET , val , !(val & BIT (CCM_BUSY_SHIFT )),
39
+ 0 , TIMEOUT_US );
40
+ if (ret )
41
+ pr_err ("Slice[%s] busy timeout\n" , clk_hw_get_name (hw ));
42
+
43
+ return ret ;
44
+ }
45
+
46
+ static void imx93_clk_composite_gate_endisable (struct clk_hw * hw , int enable )
47
+ {
48
+ struct clk_gate * gate = to_clk_gate (hw );
49
+ unsigned long flags ;
50
+ u32 reg ;
51
+
52
+ if (gate -> lock )
53
+ spin_lock_irqsave (gate -> lock , flags );
54
+
55
+ reg = readl (gate -> reg );
56
+
57
+ if (enable )
58
+ reg &= ~BIT (gate -> bit_idx );
59
+ else
60
+ reg |= BIT (gate -> bit_idx );
61
+
62
+ writel (reg , gate -> reg );
63
+
64
+ imx93_clk_composite_wait_ready (hw , gate -> reg );
65
+
66
+ if (gate -> lock )
67
+ spin_unlock_irqrestore (gate -> lock , flags );
68
+ }
69
+
70
+ static int imx93_clk_composite_gate_enable (struct clk_hw * hw )
71
+ {
72
+ imx93_clk_composite_gate_endisable (hw , 1 );
73
+
74
+ return 0 ;
75
+ }
76
+
77
+ static void imx93_clk_composite_gate_disable (struct clk_hw * hw )
78
+ {
79
+ imx93_clk_composite_gate_endisable (hw , 0 );
80
+ }
81
+
82
+ static const struct clk_ops imx93_clk_composite_gate_ops = {
83
+ .enable = imx93_clk_composite_gate_enable ,
84
+ .disable = imx93_clk_composite_gate_disable ,
85
+ .is_enabled = clk_gate_is_enabled ,
86
+ };
87
+
88
+ static unsigned long
89
+ imx93_clk_composite_divider_recalc_rate (struct clk_hw * hw , unsigned long parent_rate )
90
+ {
91
+ return clk_divider_ops .recalc_rate (hw , parent_rate );
92
+ }
93
+
94
+ static long
95
+ imx93_clk_composite_divider_round_rate (struct clk_hw * hw , unsigned long rate , unsigned long * prate )
96
+ {
97
+ return clk_divider_ops .round_rate (hw , rate , prate );
98
+ }
99
+
100
+ static int
101
+ imx93_clk_composite_divider_determine_rate (struct clk_hw * hw , struct clk_rate_request * req )
102
+ {
103
+ return clk_divider_ops .determine_rate (hw , req );
104
+ }
105
+
106
+ static int imx93_clk_composite_divider_set_rate (struct clk_hw * hw , unsigned long rate ,
107
+ unsigned long parent_rate )
108
+ {
109
+ struct clk_divider * divider = to_clk_divider (hw );
110
+ int value ;
111
+ unsigned long flags = 0 ;
112
+ u32 val ;
113
+ int ret ;
114
+
115
+ value = divider_get_val (rate , parent_rate , divider -> table , divider -> width , divider -> flags );
116
+ if (value < 0 )
117
+ return value ;
118
+
119
+ if (divider -> lock )
120
+ spin_lock_irqsave (divider -> lock , flags );
121
+
122
+ val = readl (divider -> reg );
123
+ val &= ~(clk_div_mask (divider -> width ) << divider -> shift );
124
+ val |= (u32 )value << divider -> shift ;
125
+ writel (val , divider -> reg );
126
+
127
+ ret = imx93_clk_composite_wait_ready (hw , divider -> reg );
128
+
129
+ if (divider -> lock )
130
+ spin_unlock_irqrestore (divider -> lock , flags );
131
+
132
+ return ret ;
133
+ }
134
+
135
+ static const struct clk_ops imx93_clk_composite_divider_ops = {
136
+ .recalc_rate = imx93_clk_composite_divider_recalc_rate ,
137
+ .round_rate = imx93_clk_composite_divider_round_rate ,
138
+ .determine_rate = imx93_clk_composite_divider_determine_rate ,
139
+ .set_rate = imx93_clk_composite_divider_set_rate ,
140
+ };
141
+
142
+ static u8 imx93_clk_composite_mux_get_parent (struct clk_hw * hw )
143
+ {
144
+ return clk_mux_ops .get_parent (hw );
145
+ }
146
+
147
+ static int imx93_clk_composite_mux_set_parent (struct clk_hw * hw , u8 index )
148
+ {
149
+ struct clk_mux * mux = to_clk_mux (hw );
150
+ u32 val = clk_mux_index_to_val (mux -> table , mux -> flags , index );
151
+ unsigned long flags = 0 ;
152
+ u32 reg ;
153
+ int ret ;
154
+
155
+ if (mux -> lock )
156
+ spin_lock_irqsave (mux -> lock , flags );
157
+
158
+ reg = readl (mux -> reg );
159
+ reg &= ~(mux -> mask << mux -> shift );
160
+ val = val << mux -> shift ;
161
+ reg |= val ;
162
+ writel (reg , mux -> reg );
163
+
164
+ ret = imx93_clk_composite_wait_ready (hw , mux -> reg );
165
+
166
+ if (mux -> lock )
167
+ spin_unlock_irqrestore (mux -> lock , flags );
168
+
169
+ return ret ;
170
+ }
171
+
172
+ static int
173
+ imx93_clk_composite_mux_determine_rate (struct clk_hw * hw , struct clk_rate_request * req )
174
+ {
175
+ return clk_mux_ops .determine_rate (hw , req );
176
+ }
177
+
178
+ static const struct clk_ops imx93_clk_composite_mux_ops = {
179
+ .get_parent = imx93_clk_composite_mux_get_parent ,
180
+ .set_parent = imx93_clk_composite_mux_set_parent ,
181
+ .determine_rate = imx93_clk_composite_mux_determine_rate ,
182
+ };
183
+
26
184
struct clk_hw * imx93_clk_composite_flags (const char * name , const char * const * parent_names ,
27
- int num_parents , void __iomem * reg ,
185
+ int num_parents , void __iomem * reg , u32 domain_id ,
28
186
unsigned long flags )
29
187
{
30
188
struct clk_hw * hw = ERR_PTR (- ENOMEM ), * mux_hw ;
@@ -33,6 +191,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
33
191
struct clk_gate * gate = NULL ;
34
192
struct clk_mux * mux = NULL ;
35
193
bool clk_ro = false;
194
+ u32 authen ;
36
195
37
196
mux = kzalloc (sizeof (* mux ), GFP_KERNEL );
38
197
if (!mux )
@@ -55,7 +214,8 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
55
214
div -> lock = & imx_ccm_lock ;
56
215
div -> flags = CLK_DIVIDER_ROUND_CLOSEST ;
57
216
58
- if (!(readl (reg + AUTHEN_OFFSET ) & TZ_NS_MASK ))
217
+ authen = readl (reg + AUTHEN_OFFSET );
218
+ if (!(authen & TZ_NS_MASK ) || !(authen & BIT (WHITE_LIST_SHIFT + domain_id )))
59
219
clk_ro = true;
60
220
61
221
if (clk_ro ) {
@@ -74,9 +234,10 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
74
234
gate -> flags = CLK_GATE_SET_TO_DISABLE ;
75
235
76
236
hw = clk_hw_register_composite (NULL , name , parent_names , num_parents ,
77
- mux_hw , & clk_mux_ops , div_hw ,
78
- & clk_divider_ops , gate_hw ,
79
- & clk_gate_ops , flags | CLK_SET_RATE_NO_REPARENT );
237
+ mux_hw , & imx93_clk_composite_mux_ops , div_hw ,
238
+ & imx93_clk_composite_divider_ops , gate_hw ,
239
+ & imx93_clk_composite_gate_ops ,
240
+ flags | CLK_SET_RATE_NO_REPARENT );
80
241
}
81
242
82
243
if (IS_ERR (hw ))
0 commit comments