|
19 | 19 |
|
20 | 20 | enum scmi_clk_feats {
|
21 | 21 | SCMI_CLK_ATOMIC_SUPPORTED,
|
| 22 | + SCMI_CLK_STATE_CTRL_SUPPORTED, |
22 | 23 | SCMI_CLK_FEATS_COUNT
|
23 | 24 | };
|
24 | 25 |
|
@@ -230,15 +231,19 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
|
230 | 231 | * only the prepare/unprepare API, as allowed by the clock framework
|
231 | 232 | * when atomic calls are not available.
|
232 | 233 | */
|
233 |
| - if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) { |
234 |
| - ops->enable = scmi_clk_atomic_enable; |
235 |
| - ops->disable = scmi_clk_atomic_disable; |
236 |
| - ops->is_enabled = scmi_clk_atomic_is_enabled; |
237 |
| - } else { |
238 |
| - ops->prepare = scmi_clk_enable; |
239 |
| - ops->unprepare = scmi_clk_disable; |
| 234 | + if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) { |
| 235 | + if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) { |
| 236 | + ops->enable = scmi_clk_atomic_enable; |
| 237 | + ops->disable = scmi_clk_atomic_disable; |
| 238 | + } else { |
| 239 | + ops->prepare = scmi_clk_enable; |
| 240 | + ops->unprepare = scmi_clk_disable; |
| 241 | + } |
240 | 242 | }
|
241 | 243 |
|
| 244 | + if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) |
| 245 | + ops->is_enabled = scmi_clk_atomic_is_enabled; |
| 246 | + |
242 | 247 | /* Rate ops */
|
243 | 248 | ops->recalc_rate = scmi_clk_recalc_rate;
|
244 | 249 | ops->round_rate = scmi_clk_round_rate;
|
@@ -294,6 +299,9 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
|
294 | 299 | if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
|
295 | 300 | feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
|
296 | 301 |
|
| 302 | + if (!ci->state_ctrl_forbidden) |
| 303 | + feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED); |
| 304 | + |
297 | 305 | if (WARN_ON(feats_key >= db_size))
|
298 | 306 | return NULL;
|
299 | 307 |
|
|
0 commit comments