@@ -12,6 +12,9 @@ LOG_MODULE_REGISTER(dma_cc23x0, CONFIG_DMA_LOG_LEVEL);
12
12
#include <zephyr/device.h>
13
13
#include <zephyr/drivers/dma.h>
14
14
#include <zephyr/irq.h>
15
+ #include <zephyr/pm/device.h>
16
+ #include <zephyr/pm/policy.h>
17
+ #include <zephyr/sys/atomic.h>
15
18
#include <zephyr/sys/util.h>
16
19
17
20
#include <driverlib/clkctl.h>
@@ -45,17 +48,68 @@ LOG_MODULE_REGISTER(dma_cc23x0, CONFIG_DMA_LOG_LEVEL);
45
48
#define DMA_CC23_IPID_MASK GENMASK(2, 0)
46
49
#define DMA_CC23_CHXSEL_REG (ch ) HWREG(EVTSVT_BASE + EVTSVT_O_DMACH0SEL + sizeof(uint32_t) * (ch))
47
50
51
+ #ifdef CONFIG_PM_DEVICE
52
+ /*
53
+ * For MEMORY_TO_MEMORY transfers, the transfer completion is signaled on
54
+ * the DMA interrupt i.e. dma_cc23x0_isr(). So, this driver can take care
55
+ * of PM lock/unlock for SW channels.
56
+ *
57
+ * For MEMORY_TO_PERIPHERAL and PERIPHERAL_TO_MEMORY transfers, we assume
58
+ * that DMA clients (peripheral drivers) should take care of PM lock/unlock.
59
+ * This assumption is made for that SoC because if a peripheral channel
60
+ * is used, then the transfer completion is signaled on the peripheral's
61
+ * interrupt, which is handled in the DMA client driver. This operating
62
+ * mode is specific to this SoC.
63
+ *
64
+ * As a result, PM locks are defined in this driver for MEMORY_TO_MEMORY
65
+ * channels only.
66
+ */
67
+ #define DMA_CC23X0_PM_LOCK_COUNT (DMA_CC23_SW_CH_MAX - DMA_CC23_SW_CH_MIN + 1)
68
+
69
+ #define DMA_CC23_PERIPH_CH_MASK GENMASK(DMA_CC23_PERIPH_CH_MAX, 0)
70
+ #endif
71
+
48
72
struct dma_cc23x0_channel {
49
73
uint8_t data_size ;
50
74
dma_callback_t cb ;
51
75
void * user_data ;
76
+ #ifdef CONFIG_PM_DEVICE
77
+ bool configured ;
78
+ struct dma_block_config dma_blk_cfg ;
79
+ struct dma_config dma_cfg ;
80
+ #endif
52
81
};
53
82
54
83
struct dma_cc23x0_data {
55
84
__aligned (1024 ) uDMAControlTableEntry desc [UDMA_NUM_CHANNELS ];
56
85
struct dma_cc23x0_channel channels [UDMA_NUM_CHANNELS ];
86
+ #ifdef CONFIG_PM_DEVICE
87
+ ATOMIC_DEFINE (pm_lock , DMA_CC23X0_PM_LOCK_COUNT );
88
+ #endif
57
89
};
58
90
91
+ static inline void dma_cc23x0_pm_policy_state_lock_get (struct dma_cc23x0_data * data ,
92
+ uint32_t pm_lock_id )
93
+ {
94
+ #ifdef CONFIG_PM_DEVICE
95
+ if (!atomic_test_and_set_bit (data -> pm_lock , pm_lock_id - DMA_CC23_SW_CH_MIN )) {
96
+ pm_policy_state_lock_get (PM_STATE_RUNTIME_IDLE , PM_ALL_SUBSTATES );
97
+ pm_policy_state_lock_get (PM_STATE_STANDBY , PM_ALL_SUBSTATES );
98
+ }
99
+ #endif
100
+ }
101
+
102
+ static inline void dma_cc23x0_pm_policy_state_lock_put (struct dma_cc23x0_data * data ,
103
+ uint32_t pm_lock_id )
104
+ {
105
+ #ifdef CONFIG_PM_DEVICE
106
+ if (atomic_test_and_clear_bit (data -> pm_lock , pm_lock_id - DMA_CC23_SW_CH_MIN )) {
107
+ pm_policy_state_lock_put (PM_STATE_STANDBY , PM_ALL_SUBSTATES );
108
+ pm_policy_state_lock_put (PM_STATE_RUNTIME_IDLE , PM_ALL_SUBSTATES );
109
+ }
110
+ #endif
111
+ }
112
+
59
113
/*
60
114
* If the channel is a software channel, then the completion will be signaled
61
115
* on this DMA dedicated interrupt.
@@ -84,6 +138,8 @@ static void dma_cc23x0_isr(const struct device *dev)
84
138
}
85
139
86
140
uDMAClearInt (done_flags & BIT (i ));
141
+
142
+ dma_cc23x0_pm_policy_state_lock_put (data , i );
87
143
}
88
144
}
89
145
@@ -117,6 +173,9 @@ static int dma_cc23x0_config(const struct device *dev, uint32_t channel,
117
173
uint32_t xfer_size ;
118
174
uint32_t burst_len ;
119
175
int ret ;
176
+ #ifdef CONFIG_PM_DEVICE
177
+ enum pm_device_state pm_state ;
178
+ #endif
120
179
121
180
if (channel >= UDMA_NUM_CHANNELS ) {
122
181
LOG_ERR ("Invalid channel (%u)" , channel );
@@ -233,24 +292,62 @@ static int dma_cc23x0_config(const struct device *dev, uint32_t channel,
233
292
(void * )block -> dest_address ,
234
293
xfer_size );
235
294
295
+ #ifdef CONFIG_PM_DEVICE
296
+ pm_device_state_get (dev , & pm_state );
297
+
298
+ /*
299
+ * Save context only if current function is not being called
300
+ * from resume operation for restoring channel configuration
301
+ */
302
+ if (pm_state == PM_DEVICE_STATE_ACTIVE ) {
303
+ ch_data -> configured = true;
304
+
305
+ ch_data -> dma_blk_cfg .source_address = block -> source_address ;
306
+ ch_data -> dma_blk_cfg .dest_address = block -> dest_address ;
307
+ ch_data -> dma_blk_cfg .source_addr_adj = block -> source_addr_adj ;
308
+ ch_data -> dma_blk_cfg .dest_addr_adj = block -> dest_addr_adj ;
309
+ ch_data -> dma_blk_cfg .block_size = block -> block_size ;
310
+
311
+ ch_data -> dma_cfg .dma_slot = config -> dma_slot ;
312
+ ch_data -> dma_cfg .channel_direction = config -> channel_direction ;
313
+ ch_data -> dma_cfg .block_count = config -> block_count ;
314
+ ch_data -> dma_cfg .head_block = & ch_data -> dma_blk_cfg ;
315
+ ch_data -> dma_cfg .source_data_size = config -> source_data_size ;
316
+ ch_data -> dma_cfg .dest_data_size = config -> dest_data_size ;
317
+ ch_data -> dma_cfg .source_burst_length = config -> source_burst_length ;
318
+ ch_data -> dma_cfg .dma_callback = config -> dma_callback ;
319
+ ch_data -> dma_cfg .user_data = config -> user_data ;
320
+
321
+ LOG_DBG ("Configured channel %u for %08x to %08x (%u bytes)" ,
322
+ channel ,
323
+ block -> source_address ,
324
+ block -> dest_address ,
325
+ block -> block_size );
326
+ }
327
+ #else
236
328
LOG_DBG ("Configured channel %u for %08x to %08x (%u bytes)" ,
237
329
channel ,
238
330
block -> source_address ,
239
331
block -> dest_address ,
240
332
block -> block_size );
333
+ #endif
241
334
242
335
return 0 ;
243
336
}
244
337
245
338
static int dma_cc23x0_start (const struct device * dev , uint32_t channel )
246
339
{
340
+ struct dma_cc23x0_data * data = dev -> data ;
341
+
247
342
if (uDMAIsChannelEnabled (BIT (channel ))) {
248
343
return 0 ;
249
344
}
250
345
251
346
uDMAEnableChannel (BIT (channel ));
252
347
253
348
if (DMA_CC23_IS_SW_CH (channel )) {
349
+ dma_cc23x0_pm_policy_state_lock_get (data , channel );
350
+
254
351
/* Request DMA channel to start a memory to memory transfer */
255
352
uDMARequestChannel (BIT (channel ));
256
353
}
@@ -260,8 +357,14 @@ static int dma_cc23x0_start(const struct device *dev, uint32_t channel)
260
357
261
358
static int dma_cc23x0_stop (const struct device * dev , uint32_t channel )
262
359
{
360
+ struct dma_cc23x0_data * data = dev -> data ;
361
+
263
362
uDMADisableChannel (BIT (channel ));
264
363
364
+ if (DMA_CC23_IS_SW_CH (channel )) {
365
+ dma_cc23x0_pm_policy_state_lock_put (data , channel );
366
+ }
367
+
265
368
return 0 ;
266
369
}
267
370
@@ -279,6 +382,13 @@ static int dma_cc23x0_reload(const struct device *dev, uint32_t channel,
279
382
uDMASetChannelTransfer (& data -> desc [channel ], DMA_CC23_MODE (channel ),
280
383
(void * )src , (void * )dst , xfer_size );
281
384
385
+ #ifdef CONFIG_PM_DEVICE
386
+ /* Save context */
387
+ ch_data -> dma_blk_cfg .source_address = src ;
388
+ ch_data -> dma_blk_cfg .dest_address = dst ;
389
+ ch_data -> dma_blk_cfg .block_size = size ;
390
+ #endif
391
+
282
392
LOG_DBG ("Reloaded channel %u for %08x to %08x (%u bytes)" ,
283
393
channel , src , dst , size );
284
394
@@ -343,29 +453,70 @@ static int dma_cc23x0_get_status(const struct device *dev, uint32_t channel,
343
453
return 0 ;
344
454
}
345
455
346
- static int dma_cc23x0_init ( const struct device * dev )
456
+ static int dma_cc23x0_enable ( struct dma_cc23x0_data * data )
347
457
{
348
- struct dma_cc23x0_data * data = dev -> data ;
458
+ CLKCTLEnable ( CLKCTL_BASE , CLKCTL_DMA ) ;
349
459
460
+ uDMAEnable ();
461
+
462
+ /* Set base address for channel control table (descriptors) */
463
+ uDMASetControlBase (data -> desc );
464
+
465
+ return 0 ;
466
+ }
467
+
468
+ static int dma_cc23x0_init (const struct device * dev )
469
+ {
350
470
IRQ_CONNECT (DT_INST_IRQN (0 ),
351
471
DT_INST_IRQ (0 , priority ),
352
472
dma_cc23x0_isr ,
353
473
DEVICE_DT_INST_GET (0 ),
354
474
0 );
355
475
irq_enable (DT_INST_IRQN (0 ));
356
476
357
- /* Enable clock */
358
- CLKCTLEnable ( CLKCTL_BASE , CLKCTL_DMA );
477
+ return dma_cc23x0_enable ( dev -> data );
478
+ }
359
479
360
- /* Enable DMA */
361
- uDMAEnable ();
480
+ #ifdef CONFIG_PM_DEVICE
362
481
363
- /* Set base address for channel control table (descriptors) */
364
- uDMASetControlBase (data -> desc );
482
+ static int dma_cc23x0_pm_action (const struct device * dev , enum pm_device_action action )
483
+ {
484
+ struct dma_cc23x0_data * data = dev -> data ;
485
+ int i = 0 ;
486
+
487
+ switch (action ) {
488
+ case PM_DEVICE_ACTION_SUSPEND :
489
+ /*
490
+ * Ensure that none transfer is ongoing in case PM state lock was not
491
+ * properly handled by DMA clients.
492
+ */
493
+ if (uDMAIsChannelEnabled (DMA_CC23_PERIPH_CH_MASK )) {
494
+ return - EBUSY ;
495
+ }
365
496
366
- return 0 ;
497
+ uDMADisable ();
498
+ CLKCTLDisable (CLKCTL_BASE , CLKCTL_DMA );
499
+
500
+ return 0 ;
501
+ case PM_DEVICE_ACTION_RESUME :
502
+ dma_cc23x0_enable (data );
503
+
504
+ /* Restore context for the channels that were configured before */
505
+ ARRAY_FOR_EACH_PTR (data -> channels , ch_data ) {
506
+ if (ch_data -> configured ) {
507
+ dma_cc23x0_config (dev , i , & ch_data -> dma_cfg );
508
+ }
509
+ i ++ ;
510
+ }
511
+
512
+ return 0 ;
513
+ default :
514
+ return - ENOTSUP ;
515
+ }
367
516
}
368
517
518
+ #endif /* CONFIG_PM_DEVICE */
519
+
369
520
static struct dma_cc23x0_data cc23x0_data ;
370
521
371
522
static DEVICE_API (dma , dma_cc23x0_api ) = {
@@ -376,7 +527,10 @@ static DEVICE_API(dma, dma_cc23x0_api) = {
376
527
.get_status = dma_cc23x0_get_status ,
377
528
};
378
529
379
- DEVICE_DT_INST_DEFINE (0 , dma_cc23x0_init , NULL ,
530
+ PM_DEVICE_DT_INST_DEFINE (0 , dma_cc23x0_pm_action );
531
+
532
+ DEVICE_DT_INST_DEFINE (0 , dma_cc23x0_init ,
533
+ PM_DEVICE_DT_INST_GET (0 ),
380
534
& cc23x0_data , NULL ,
381
535
PRE_KERNEL_1 , CONFIG_DMA_INIT_PRIORITY ,
382
536
& dma_cc23x0_api );
0 commit comments