Skip to content

Commit 8ae0e97

Browse files
justin-heChristoph Hellwig
authored andcommitted
dma-mapping: move dma_addressing_limited() out of line
This patch moves dma_addressing_limited() out of line, serving as a preliminary step to prevent the introduction of a new publicly accessible low-level helper when validating whether all system RAM is mapped within the DMA mapping range. Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jia He <justin.he@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
1 parent a5e3b12 commit 8ae0e97

File tree

2 files changed

+20
-14
lines changed

2 files changed

+20
-14
lines changed

include/linux/dma-mapping.h

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
144144
int dma_set_mask(struct device *dev, u64 mask);
145145
int dma_set_coherent_mask(struct device *dev, u64 mask);
146146
u64 dma_get_required_mask(struct device *dev);
147+
bool dma_addressing_limited(struct device *dev);
147148
size_t dma_max_mapping_size(struct device *dev);
148149
size_t dma_opt_mapping_size(struct device *dev);
149150
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
@@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
264265
{
265266
return 0;
266267
}
268+
static inline bool dma_addressing_limited(struct device *dev)
269+
{
270+
return false;
271+
}
267272
static inline size_t dma_max_mapping_size(struct device *dev)
268273
{
269274
return 0;
@@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
465470
return dma_set_mask_and_coherent(dev, mask);
466471
}
467472

468-
/**
469-
* dma_addressing_limited - return if the device is addressing limited
470-
* @dev: device to check
471-
*
472-
* Return %true if the devices DMA mask is too small to address all memory in
473-
* the system, else %false. Lack of addressing bits is the prime reason for
474-
* bounce buffering, but might not be the only one.
475-
*/
476-
static inline bool dma_addressing_limited(struct device *dev)
477-
{
478-
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
479-
dma_get_required_mask(dev);
480-
}
481-
482473
static inline unsigned int dma_get_max_seg_size(struct device *dev)
483474
{
484475
if (dev->dma_parms && dev->dma_parms->max_segment_size)

kernel/dma/mapping.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -793,6 +793,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
793793
}
794794
EXPORT_SYMBOL(dma_set_coherent_mask);
795795

796+
/**
797+
* dma_addressing_limited - return if the device is addressing limited
798+
* @dev: device to check
799+
*
800+
* Return %true if the devices DMA mask is too small to address all memory in
801+
* the system, else %false. Lack of addressing bits is the prime reason for
802+
* bounce buffering, but might not be the only one.
803+
*/
804+
bool dma_addressing_limited(struct device *dev)
805+
{
806+
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
807+
dma_get_required_mask(dev);
808+
}
809+
EXPORT_SYMBOL_GPL(dma_addressing_limited);
810+
796811
size_t dma_max_mapping_size(struct device *dev)
797812
{
798813
const struct dma_map_ops *ops = get_dma_ops(dev);

0 commit comments

Comments
 (0)