Skip to content

Commit 894371d

Browse files
committed
drivers: dma: cc23x0: Add power management
Add PM support to cc23x0 DMA module. Signed-off-by: Julien Panis <jpanis@baylibre.com>
1 parent 179045e commit 894371d

File tree

1 file changed

+164
-10
lines changed

1 file changed

+164
-10
lines changed

drivers/dma/dma_ti_cc23x0.c

Lines changed: 164 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ LOG_MODULE_REGISTER(dma_cc23x0, CONFIG_DMA_LOG_LEVEL);
1212
#include <zephyr/device.h>
1313
#include <zephyr/drivers/dma.h>
1414
#include <zephyr/irq.h>
15+
#include <zephyr/pm/device.h>
16+
#include <zephyr/pm/policy.h>
17+
#include <zephyr/sys/atomic.h>
1518
#include <zephyr/sys/util.h>
1619

1720
#include <driverlib/clkctl.h>
@@ -45,17 +48,68 @@ LOG_MODULE_REGISTER(dma_cc23x0, CONFIG_DMA_LOG_LEVEL);
4548
#define DMA_CC23_IPID_MASK GENMASK(2, 0)
4649
#define DMA_CC23_CHXSEL_REG(ch) HWREG(EVTSVT_BASE + EVTSVT_O_DMACH0SEL + sizeof(uint32_t) * (ch))
4750

51+
#ifdef CONFIG_PM_DEVICE
52+
/*
53+
* For MEMORY_TO_MEMORY transfers, the transfer completion is signaled on
54+
* the DMA interrupt i.e. dma_cc23x0_isr(). So, this driver can take care
55+
* of PM lock/unlock for SW channels.
56+
*
57+
* For MEMORY_TO_PERIPHERAL and PERIPHERAL_TO_MEMORY transfers, we assume
58+
* that DMA clients (peripheral drivers) should take care of PM lock/unlock.
59+
* This assumption is made for that SoC because if a peripheral channel
60+
* is used, then the transfer completion is signaled on the peripheral's
61+
* interrupt, which is handled in the DMA client driver. This operating
62+
* mode is specific to this SoC.
63+
*
64+
* As a result, PM locks are defined in this driver for MEMORY_TO_MEMORY
65+
* channels only.
66+
*/
67+
#define DMA_CC23X0_PM_LOCK_COUNT (DMA_CC23_SW_CH_MAX - DMA_CC23_SW_CH_MIN + 1)
68+
69+
#define DMA_CC23_PERIPH_CH_MASK GENMASK(DMA_CC23_PERIPH_CH_MAX, 0)
70+
#endif
71+
4872
struct dma_cc23x0_channel {
4973
uint8_t data_size;
5074
dma_callback_t cb;
5175
void *user_data;
76+
#ifdef CONFIG_PM_DEVICE
77+
bool configured;
78+
struct dma_block_config dma_blk_cfg;
79+
struct dma_config dma_cfg;
80+
#endif
5281
};
5382

5483
struct dma_cc23x0_data {
5584
__aligned(1024) uDMAControlTableEntry desc[UDMA_NUM_CHANNELS];
5685
struct dma_cc23x0_channel channels[UDMA_NUM_CHANNELS];
86+
#ifdef CONFIG_PM_DEVICE
87+
ATOMIC_DEFINE(pm_lock, DMA_CC23X0_PM_LOCK_COUNT);
88+
#endif
5789
};
5890

91+
static inline void dma_cc23x0_pm_policy_state_lock_get(struct dma_cc23x0_data *data,
92+
uint32_t pm_lock_id)
93+
{
94+
#ifdef CONFIG_PM_DEVICE
95+
if (!atomic_test_and_set_bit(data->pm_lock, pm_lock_id - DMA_CC23_SW_CH_MIN)) {
96+
pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
97+
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
98+
}
99+
#endif
100+
}
101+
102+
static inline void dma_cc23x0_pm_policy_state_lock_put(struct dma_cc23x0_data *data,
103+
uint32_t pm_lock_id)
104+
{
105+
#ifdef CONFIG_PM_DEVICE
106+
if (atomic_test_and_clear_bit(data->pm_lock, pm_lock_id - DMA_CC23_SW_CH_MIN)) {
107+
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
108+
pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
109+
}
110+
#endif
111+
}
112+
59113
/*
60114
* If the channel is a software channel, then the completion will be signaled
61115
* on this DMA dedicated interrupt.
@@ -84,6 +138,8 @@ static void dma_cc23x0_isr(const struct device *dev)
84138
}
85139

86140
uDMAClearInt(done_flags & BIT(i));
141+
142+
dma_cc23x0_pm_policy_state_lock_put(data, i);
87143
}
88144
}
89145

@@ -117,6 +173,9 @@ static int dma_cc23x0_config(const struct device *dev, uint32_t channel,
117173
uint32_t xfer_size;
118174
uint32_t burst_len;
119175
int ret;
176+
#ifdef CONFIG_PM_DEVICE
177+
enum pm_device_state pm_state;
178+
#endif
120179

121180
if (channel >= UDMA_NUM_CHANNELS) {
122181
LOG_ERR("Invalid channel (%u)", channel);
@@ -233,24 +292,62 @@ static int dma_cc23x0_config(const struct device *dev, uint32_t channel,
233292
(void *)block->dest_address,
234293
xfer_size);
235294

295+
#ifdef CONFIG_PM_DEVICE
296+
pm_device_state_get(dev, &pm_state);
297+
298+
/*
299+
* Save context only if current function is not being called
300+
* from resume operation for restoring channel configuration
301+
*/
302+
if (pm_state == PM_DEVICE_STATE_ACTIVE) {
303+
ch_data->configured = true;
304+
305+
ch_data->dma_blk_cfg.source_address = block->source_address;
306+
ch_data->dma_blk_cfg.dest_address = block->dest_address;
307+
ch_data->dma_blk_cfg.source_addr_adj = block->source_addr_adj;
308+
ch_data->dma_blk_cfg.dest_addr_adj = block->dest_addr_adj;
309+
ch_data->dma_blk_cfg.block_size = block->block_size;
310+
311+
ch_data->dma_cfg.dma_slot = config->dma_slot;
312+
ch_data->dma_cfg.channel_direction = config->channel_direction;
313+
ch_data->dma_cfg.block_count = config->block_count;
314+
ch_data->dma_cfg.head_block = &ch_data->dma_blk_cfg;
315+
ch_data->dma_cfg.source_data_size = config->source_data_size;
316+
ch_data->dma_cfg.dest_data_size = config->dest_data_size;
317+
ch_data->dma_cfg.source_burst_length = config->source_burst_length;
318+
ch_data->dma_cfg.dma_callback = config->dma_callback;
319+
ch_data->dma_cfg.user_data = config->user_data;
320+
321+
LOG_DBG("Configured channel %u for %08x to %08x (%u bytes)",
322+
channel,
323+
block->source_address,
324+
block->dest_address,
325+
block->block_size);
326+
}
327+
#else
236328
LOG_DBG("Configured channel %u for %08x to %08x (%u bytes)",
237329
channel,
238330
block->source_address,
239331
block->dest_address,
240332
block->block_size);
333+
#endif
241334

242335
return 0;
243336
}
244337

245338
static int dma_cc23x0_start(const struct device *dev, uint32_t channel)
246339
{
340+
struct dma_cc23x0_data *data = dev->data;
341+
247342
if (uDMAIsChannelEnabled(BIT(channel))) {
248343
return 0;
249344
}
250345

251346
uDMAEnableChannel(BIT(channel));
252347

253348
if (DMA_CC23_IS_SW_CH(channel)) {
349+
dma_cc23x0_pm_policy_state_lock_get(data, channel);
350+
254351
/* Request DMA channel to start a memory to memory transfer */
255352
uDMARequestChannel(BIT(channel));
256353
}
@@ -260,8 +357,14 @@ static int dma_cc23x0_start(const struct device *dev, uint32_t channel)
260357

261358
static int dma_cc23x0_stop(const struct device *dev, uint32_t channel)
262359
{
360+
struct dma_cc23x0_data *data = dev->data;
361+
263362
uDMADisableChannel(BIT(channel));
264363

364+
if (DMA_CC23_IS_SW_CH(channel)) {
365+
dma_cc23x0_pm_policy_state_lock_put(data, channel);
366+
}
367+
265368
return 0;
266369
}
267370

@@ -279,6 +382,13 @@ static int dma_cc23x0_reload(const struct device *dev, uint32_t channel,
279382
uDMASetChannelTransfer(&data->desc[channel], DMA_CC23_MODE(channel),
280383
(void *)src, (void *)dst, xfer_size);
281384

385+
#ifdef CONFIG_PM_DEVICE
386+
/* Save context */
387+
ch_data->dma_blk_cfg.source_address = src;
388+
ch_data->dma_blk_cfg.dest_address = dst;
389+
ch_data->dma_blk_cfg.block_size = size;
390+
#endif
391+
282392
LOG_DBG("Reloaded channel %u for %08x to %08x (%u bytes)",
283393
channel, src, dst, size);
284394

@@ -343,29 +453,70 @@ static int dma_cc23x0_get_status(const struct device *dev, uint32_t channel,
343453
return 0;
344454
}
345455

346-
static int dma_cc23x0_init(const struct device *dev)
456+
static int dma_cc23x0_enable(struct dma_cc23x0_data *data)
347457
{
348-
struct dma_cc23x0_data *data = dev->data;
458+
CLKCTLEnable(CLKCTL_BASE, CLKCTL_DMA);
349459

460+
uDMAEnable();
461+
462+
/* Set base address for channel control table (descriptors) */
463+
uDMASetControlBase(data->desc);
464+
465+
return 0;
466+
}
467+
468+
static int dma_cc23x0_init(const struct device *dev)
469+
{
350470
IRQ_CONNECT(DT_INST_IRQN(0),
351471
DT_INST_IRQ(0, priority),
352472
dma_cc23x0_isr,
353473
DEVICE_DT_INST_GET(0),
354474
0);
355475
irq_enable(DT_INST_IRQN(0));
356476

357-
/* Enable clock */
358-
CLKCTLEnable(CLKCTL_BASE, CLKCTL_DMA);
477+
return dma_cc23x0_enable(dev->data);
478+
}
359479

360-
/* Enable DMA */
361-
uDMAEnable();
480+
#ifdef CONFIG_PM_DEVICE
362481

363-
/* Set base address for channel control table (descriptors) */
364-
uDMASetControlBase(data->desc);
482+
static int dma_cc23x0_pm_action(const struct device *dev, enum pm_device_action action)
483+
{
484+
struct dma_cc23x0_data *data = dev->data;
485+
int i = 0;
486+
487+
switch (action) {
488+
case PM_DEVICE_ACTION_SUSPEND:
489+
/*
490+
* Ensure that none transfer is ongoing in case PM state lock was not
491+
* properly handled by DMA clients.
492+
*/
493+
if (uDMAIsChannelEnabled(DMA_CC23_PERIPH_CH_MASK)) {
494+
return -EBUSY;
495+
}
365496

366-
return 0;
497+
uDMADisable();
498+
CLKCTLDisable(CLKCTL_BASE, CLKCTL_DMA);
499+
500+
return 0;
501+
case PM_DEVICE_ACTION_RESUME:
502+
dma_cc23x0_enable(data);
503+
504+
/* Restore context for the channels that were configured before */
505+
ARRAY_FOR_EACH_PTR(data->channels, ch_data) {
506+
if (ch_data->configured) {
507+
dma_cc23x0_config(dev, i, &ch_data->dma_cfg);
508+
}
509+
i++;
510+
}
511+
512+
return 0;
513+
default:
514+
return -ENOTSUP;
515+
}
367516
}
368517

518+
#endif /* CONFIG_PM_DEVICE */
519+
369520
static struct dma_cc23x0_data cc23x0_data;
370521

371522
static DEVICE_API(dma, dma_cc23x0_api) = {
@@ -376,7 +527,10 @@ static DEVICE_API(dma, dma_cc23x0_api) = {
376527
.get_status = dma_cc23x0_get_status,
377528
};
378529

379-
DEVICE_DT_INST_DEFINE(0, dma_cc23x0_init, NULL,
530+
PM_DEVICE_DT_INST_DEFINE(0, dma_cc23x0_pm_action);
531+
532+
DEVICE_DT_INST_DEFINE(0, dma_cc23x0_init,
533+
PM_DEVICE_DT_INST_GET(0),
380534
&cc23x0_data, NULL,
381535
PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,
382536
&dma_cc23x0_api);

0 commit comments

Comments
 (0)