@@ -12,6 +12,7 @@ LOG_MODULE_REGISTER(dma_cc23x0, CONFIG_DMA_LOG_LEVEL);
12
12
#include <zephyr/device.h>
13
13
#include <zephyr/drivers/dma.h>
14
14
#include <zephyr/irq.h>
15
+ #include <zephyr/pm/device.h>
15
16
#include <zephyr/sys/util.h>
16
17
17
18
#include <driverlib/clkctl.h>
@@ -45,10 +46,19 @@ LOG_MODULE_REGISTER(dma_cc23x0, CONFIG_DMA_LOG_LEVEL);
45
46
#define DMA_CC23_IPID_MASK GENMASK(2, 0)
46
47
#define DMA_CC23_CHXSEL_REG (ch ) HWREG(EVTSVT_BASE + EVTSVT_O_DMACH0SEL + sizeof(uint32_t) * (ch))
47
48
49
+ #ifdef CONFIG_PM_DEVICE
50
+ #define DMA_CC23_ALL_CH_MASK GENMASK(DMA_CC23_SW_CH_MAX, 0)
51
+ #endif
52
+
48
53
struct dma_cc23x0_channel {
49
54
uint8_t data_size ;
50
55
dma_callback_t cb ;
51
56
void * user_data ;
57
+ #ifdef CONFIG_PM_DEVICE
58
+ bool configured ;
59
+ struct dma_block_config dma_blk_cfg ;
60
+ struct dma_config dma_cfg ;
61
+ #endif
52
62
};
53
63
54
64
struct dma_cc23x0_data {
@@ -117,6 +127,9 @@ static int dma_cc23x0_config(const struct device *dev, uint32_t channel,
117
127
uint32_t xfer_size ;
118
128
uint32_t burst_len ;
119
129
int ret ;
130
+ #ifdef CONFIG_PM_DEVICE
131
+ enum pm_device_state pm_state ;
132
+ #endif
120
133
121
134
if (channel >= UDMA_NUM_CHANNELS ) {
122
135
LOG_ERR ("Invalid channel (%u)" , channel );
@@ -233,11 +246,45 @@ static int dma_cc23x0_config(const struct device *dev, uint32_t channel,
233
246
(void * )block -> dest_address ,
234
247
xfer_size );
235
248
249
+ #ifdef CONFIG_PM_DEVICE
250
+ pm_device_state_get (dev , & pm_state );
251
+
252
+ /*
253
+ * Save context only if current function is not being called
254
+ * from resume operation for restoring channel configuration
255
+ */
256
+ if (pm_state == PM_DEVICE_STATE_ACTIVE ) {
257
+ ch_data -> configured = true;
258
+
259
+ ch_data -> dma_blk_cfg .source_address = block -> source_address ;
260
+ ch_data -> dma_blk_cfg .dest_address = block -> dest_address ;
261
+ ch_data -> dma_blk_cfg .source_addr_adj = block -> source_addr_adj ;
262
+ ch_data -> dma_blk_cfg .dest_addr_adj = block -> dest_addr_adj ;
263
+ ch_data -> dma_blk_cfg .block_size = block -> block_size ;
264
+
265
+ ch_data -> dma_cfg .dma_slot = config -> dma_slot ;
266
+ ch_data -> dma_cfg .channel_direction = config -> channel_direction ;
267
+ ch_data -> dma_cfg .block_count = config -> block_count ;
268
+ ch_data -> dma_cfg .head_block = & ch_data -> dma_blk_cfg ;
269
+ ch_data -> dma_cfg .source_data_size = config -> source_data_size ;
270
+ ch_data -> dma_cfg .dest_data_size = config -> dest_data_size ;
271
+ ch_data -> dma_cfg .source_burst_length = config -> source_burst_length ;
272
+ ch_data -> dma_cfg .dma_callback = config -> dma_callback ;
273
+ ch_data -> dma_cfg .user_data = config -> user_data ;
274
+
275
+ LOG_DBG ("Configured channel %u for %08x to %08x (%u bytes)" ,
276
+ channel ,
277
+ block -> source_address ,
278
+ block -> dest_address ,
279
+ block -> block_size );
280
+ }
281
+ #else
236
282
LOG_DBG ("Configured channel %u for %08x to %08x (%u bytes)" ,
237
283
channel ,
238
284
block -> source_address ,
239
285
block -> dest_address ,
240
286
block -> block_size );
287
+ #endif
241
288
242
289
return 0 ;
243
290
}
@@ -279,6 +326,13 @@ static int dma_cc23x0_reload(const struct device *dev, uint32_t channel,
279
326
uDMASetChannelTransfer (& data -> desc [channel ], DMA_CC23_MODE (channel ),
280
327
(void * )src , (void * )dst , xfer_size );
281
328
329
+ #ifdef CONFIG_PM_DEVICE
330
+ /* Save context */
331
+ ch_data -> dma_blk_cfg .source_address = src ;
332
+ ch_data -> dma_blk_cfg .dest_address = dst ;
333
+ ch_data -> dma_blk_cfg .block_size = size ;
334
+ #endif
335
+
282
336
LOG_DBG ("Reloaded channel %u for %08x to %08x (%u bytes)" ,
283
337
channel , src , dst , size );
284
338
@@ -343,29 +397,83 @@ static int dma_cc23x0_get_status(const struct device *dev, uint32_t channel,
343
397
return 0 ;
344
398
}
345
399
346
- static int dma_cc23x0_init ( const struct device * dev )
400
+ static int dma_cc23x0_enable ( struct dma_cc23x0_data * data )
347
401
{
348
- struct dma_cc23x0_data * data = dev -> data ;
402
+ CLKCTLEnable (CLKCTL_BASE , CLKCTL_DMA );
403
+
404
+ uDMAEnable ();
405
+
406
+ /* Set base address for channel control table (descriptors) */
407
+ uDMASetControlBase (data -> desc );
408
+
409
+ return 0 ;
410
+ }
349
411
412
+ static int dma_cc23x0_init (const struct device * dev )
413
+ {
350
414
IRQ_CONNECT (DT_INST_IRQN (0 ),
351
415
DT_INST_IRQ (0 , priority ),
352
416
dma_cc23x0_isr ,
353
417
DEVICE_DT_INST_GET (0 ),
354
418
0 );
355
419
irq_enable (DT_INST_IRQN (0 ));
356
420
357
- /* Enable clock */
358
- CLKCTLEnable ( CLKCTL_BASE , CLKCTL_DMA );
421
+ return dma_cc23x0_enable ( dev -> data );
422
+ }
359
423
360
- /* Enable DMA */
361
- uDMAEnable ();
424
+ #ifdef CONFIG_PM_DEVICE
362
425
363
- /* Set base address for channel control table (descriptors) */
364
- uDMASetControlBase (data -> desc );
426
+ static int dma_cc23x0_pm_action (const struct device * dev , enum pm_device_action action )
427
+ {
428
+ struct dma_cc23x0_data * data = dev -> data ;
429
+ int i = 0 ;
430
+
431
+ switch (action ) {
432
+ case PM_DEVICE_ACTION_SUSPEND :
433
+ /*
434
+ * We assume that DMA clients (peripheral drivers or applications)
435
+ * should take care of PM lock/unlock (pm_policy_state_lock_get/put).
436
+ * This assumption is made for that SoC because:
437
+ * - If a peripheral channel is used, then the transfer completion is
438
+ * signaled on the peripheral's interrupt (handled in the DMA client
439
+ * driver). This operating mode is specific to this SoC.
440
+ * - If a software channel is used (memory-to-memory transfer), then
441
+ * the transfer completion can be signaled to the application through
442
+ * a callback.
443
+ * Thus, in both cases, the PM can be unlocked at the right time by the
444
+ * DMA client. When this point is reached, there should not be ongoing
445
+ * transfer.
446
+ *
447
+ * Despite this assumption, ensure that none transfer is ongoing in case
448
+ * PM state lock was not properly handled by DMA clients.
449
+ */
450
+ if (uDMAIsChannelEnabled (DMA_CC23_ALL_CH_MASK )) {
451
+ return - EBUSY ;
452
+ }
365
453
366
- return 0 ;
454
+ uDMADisable ();
455
+ CLKCTLDisable (CLKCTL_BASE , CLKCTL_DMA );
456
+
457
+ return 0 ;
458
+ case PM_DEVICE_ACTION_RESUME :
459
+ dma_cc23x0_enable (data );
460
+
461
+ /* Restore context for the channels that were configured before */
462
+ ARRAY_FOR_EACH_PTR (data -> channels , ch_data ) {
463
+ if (ch_data -> configured ) {
464
+ dma_cc23x0_config (dev , i , & ch_data -> dma_cfg );
465
+ }
466
+ i ++ ;
467
+ }
468
+
469
+ return 0 ;
470
+ default :
471
+ return - ENOTSUP ;
472
+ }
367
473
}
368
474
475
+ #endif /* CONFIG_PM_DEVICE */
476
+
369
477
static struct dma_cc23x0_data cc23x0_data ;
370
478
371
479
static DEVICE_API (dma , dma_cc23x0_api ) = {
@@ -376,7 +484,10 @@ static DEVICE_API(dma, dma_cc23x0_api) = {
376
484
.get_status = dma_cc23x0_get_status ,
377
485
};
378
486
379
- DEVICE_DT_INST_DEFINE (0 , dma_cc23x0_init , NULL ,
487
+ PM_DEVICE_DT_INST_DEFINE (0 , dma_cc23x0_pm_action );
488
+
489
+ DEVICE_DT_INST_DEFINE (0 , dma_cc23x0_init ,
490
+ PM_DEVICE_DT_INST_GET (0 ),
380
491
& cc23x0_data , NULL ,
381
492
PRE_KERNEL_1 , CONFIG_DMA_INIT_PRIORITY ,
382
493
& dma_cc23x0_api );
0 commit comments