Skip to content

Commit fea1877

Browse files
Petr TesarikChristoph Hellwig
authored andcommitted
swiotlb: add documentation and rename swiotlb_do_find_slots()
Add some kernel-doc comments and move the existing documentation of struct io_tlb_slot to its correct location. The latter was forgotten in commit 942a818 ("swiotlb: move struct io_tlb_slot to swiotlb.c"). Use the opportunity to give swiotlb_do_find_slots() a more descriptive name and make it clear how it differs from swiotlb_find_slots(). Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
1 parent 05ee774 commit fea1877

File tree

2 files changed

+66
-10
lines changed

2 files changed

+66
-10
lines changed

include/linux/swiotlb.h

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
7676
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
7777
* @end. For default swiotlb, this is command line adjustable via
7878
* setup_io_tlb_npages.
79-
* @list: The free list describing the number of free entries available
80-
* from each index.
81-
* @orig_addr: The original address corresponding to a mapped entry.
82-
* @alloc_size: Size of the allocated buffer.
8379
* @debugfs: The dentry to debugfs.
8480
* @late_alloc: %true if allocated using the page allocator
8581
* @force_bounce: %true if swiotlb bouncing is forced
@@ -111,6 +107,17 @@ struct io_tlb_mem {
111107
#endif
112108
};
113109

110+
/**
111+
* is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
112+
* @dev: Device which has mapped the buffer.
113+
* @paddr: Physical address within the DMA buffer.
114+
*
115+
* Check if @paddr points into a bounce buffer.
116+
*
117+
* Return:
118+
* * %true if @paddr points into a bounce buffer
119+
* * %false otherwise
120+
*/
114121
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
115122
{
116123
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;

kernel/dma/swiotlb.c

Lines changed: 55 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,13 @@
6262

6363
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
6464

65+
/**
66+
* struct io_tlb_slot - IO TLB slot descriptor
67+
* @orig_addr: The original address corresponding to a mapped entry.
68+
* @alloc_size: Size of the allocated buffer.
69+
* @list: The free list describing the number of free entries available
70+
* from each index.
71+
*/
6572
struct io_tlb_slot {
6673
phys_addr_t orig_addr;
6774
size_t alloc_size;
@@ -635,11 +642,22 @@ static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
635642
}
636643
#endif /* CONFIG_DEBUG_FS */
637644

638-
/*
639-
* Find a suitable number of IO TLB entries size that will fit this request and
640-
* allocate a buffer from that IO TLB pool.
645+
/**
646+
* swiotlb_area_find_slots() - search for slots in one IO TLB memory area
647+
* @dev: Device which maps the buffer.
648+
* @area_index: Index of the IO TLB memory area to be searched.
649+
* @orig_addr: Original (non-bounced) IO buffer address.
650+
* @alloc_size: Total requested size of the bounce buffer,
651+
* including initial alignment padding.
652+
* @alloc_align_mask: Required alignment of the allocated buffer.
653+
*
654+
* Find a suitable sequence of IO TLB entries for the request and allocate
655+
* a buffer from the given IO TLB memory area.
656+
* This function takes care of locking.
657+
*
658+
* Return: Index of the first allocated slot, or -1 on error.
641659
*/
642-
static int swiotlb_do_find_slots(struct device *dev, int area_index,
660+
static int swiotlb_area_find_slots(struct device *dev, int area_index,
643661
phys_addr_t orig_addr, size_t alloc_size,
644662
unsigned int alloc_align_mask)
645663
{
@@ -734,6 +752,19 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
734752
return slot_index;
735753
}
736754

755+
/**
756+
* swiotlb_find_slots() - search for slots in the whole swiotlb
757+
* @dev: Device which maps the buffer.
758+
* @orig_addr: Original (non-bounced) IO buffer address.
759+
* @alloc_size: Total requested size of the bounce buffer,
760+
* including initial alignment padding.
761+
* @alloc_align_mask: Required alignment of the allocated buffer.
762+
*
763+
* Search through the whole software IO TLB to find a sequence of slots that
764+
* match the allocation constraints.
765+
*
766+
* Return: Index of the first allocated slot, or -1 on error.
767+
*/
737768
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
738769
size_t alloc_size, unsigned int alloc_align_mask)
739770
{
@@ -742,8 +773,8 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
742773
int i = start, index;
743774

744775
do {
745-
index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
746-
alloc_align_mask);
776+
index = swiotlb_area_find_slots(dev, i, orig_addr, alloc_size,
777+
alloc_align_mask);
747778
if (index >= 0)
748779
return index;
749780
if (++i >= mem->nareas)
@@ -755,13 +786,31 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
755786

756787
#ifdef CONFIG_DEBUG_FS
757788

789+
/**
790+
* mem_used() - get number of used slots in an allocator
791+
* @mem: Software IO TLB allocator.
792+
*
793+
* The result is accurate in this version of the function, because an atomic
794+
* counter is available if CONFIG_DEBUG_FS is set.
795+
*
796+
* Return: Number of used slots.
797+
*/
758798
static unsigned long mem_used(struct io_tlb_mem *mem)
759799
{
760800
return atomic_long_read(&mem->total_used);
761801
}
762802

763803
#else /* !CONFIG_DEBUG_FS */
764804

805+
/**
806+
* mem_used() - get number of used slots in an allocator
807+
* @mem: Software IO TLB allocator.
808+
*
809+
* The result is not accurate, because there is no locking of individual
810+
* areas.
811+
*
812+
* Return: Approximate number of used slots.
813+
*/
765814
static unsigned long mem_used(struct io_tlb_mem *mem)
766815
{
767816
int i;

0 commit comments

Comments
 (0)