@@ -96,6 +96,78 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
96
96
97
97
static DEVICE_ATTR_RO (pools );
98
98
99
+ #ifdef DMAPOOL_DEBUG
100
+ static void pool_check_block (struct dma_pool * pool , void * retval ,
101
+ unsigned int offset , gfp_t mem_flags )
102
+ {
103
+ int i ;
104
+ u8 * data = retval ;
105
+ /* page->offset is stored in first 4 bytes */
106
+ for (i = sizeof (offset ); i < pool -> size ; i ++ ) {
107
+ if (data [i ] == POOL_POISON_FREED )
108
+ continue ;
109
+ dev_err (pool -> dev , "%s %s, %p (corrupted)\n" ,
110
+ __func__ , pool -> name , retval );
111
+
112
+ /*
113
+ * Dump the first 4 bytes even if they are not
114
+ * POOL_POISON_FREED
115
+ */
116
+ print_hex_dump (KERN_ERR , "" , DUMP_PREFIX_OFFSET , 16 , 1 ,
117
+ data , pool -> size , 1 );
118
+ break ;
119
+ }
120
+ if (!want_init_on_alloc (mem_flags ))
121
+ memset (retval , POOL_POISON_ALLOCATED , pool -> size );
122
+ }
123
+
124
+ static bool pool_page_err (struct dma_pool * pool , struct dma_page * page ,
125
+ void * vaddr , dma_addr_t dma )
126
+ {
127
+ unsigned int offset = vaddr - page -> vaddr ;
128
+ unsigned int chain = page -> offset ;
129
+
130
+ if ((dma - page -> dma ) != offset ) {
131
+ dev_err (pool -> dev , "%s %s, %p (bad vaddr)/%pad\n" ,
132
+ __func__ , pool -> name , vaddr , & dma );
133
+ return true;
134
+ }
135
+
136
+ while (chain < pool -> allocation ) {
137
+ if (chain != offset ) {
138
+ chain = * (int * )(page -> vaddr + chain );
139
+ continue ;
140
+ }
141
+ dev_err (pool -> dev , "%s %s, dma %pad already free\n" ,
142
+ __func__ , pool -> name , & dma );
143
+ return true;
144
+ }
145
+ memset (vaddr , POOL_POISON_FREED , pool -> size );
146
+ return false;
147
+ }
148
+
149
+ static void pool_init_page (struct dma_pool * pool , struct dma_page * page )
150
+ {
151
+ memset (page -> vaddr , POOL_POISON_FREED , pool -> allocation );
152
+ }
153
+ #else
154
+ static void pool_check_block (struct dma_pool * pool , void * retval ,
155
+ unsigned int offset , gfp_t mem_flags )
156
+
157
+ {
158
+ }
159
+
160
+ static bool pool_page_err (struct dma_pool * pool , struct dma_page * page ,
161
+ void * vaddr , dma_addr_t dma )
162
+ {
163
+ return false;
164
+ }
165
+
166
+ static void pool_init_page (struct dma_pool * pool , struct dma_page * page )
167
+ {
168
+ }
169
+ #endif
170
+
99
171
/**
100
172
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
101
173
* @name: name of pool, for diagnostics
@@ -223,9 +295,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
223
295
page -> vaddr = dma_alloc_coherent (pool -> dev , pool -> allocation ,
224
296
& page -> dma , mem_flags );
225
297
if (page -> vaddr ) {
226
- #ifdef DMAPOOL_DEBUG
227
- memset (page -> vaddr , POOL_POISON_FREED , pool -> allocation );
228
- #endif
298
+ pool_init_page (pool , page );
229
299
pool_initialise_page (pool , page );
230
300
page -> in_use = 0 ;
231
301
page -> offset = 0 ;
@@ -245,9 +315,7 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245
315
{
246
316
dma_addr_t dma = page -> dma ;
247
317
248
- #ifdef DMAPOOL_DEBUG
249
- memset (page -> vaddr , POOL_POISON_FREED , pool -> allocation );
250
- #endif
318
+ pool_init_page (pool , page );
251
319
dma_free_coherent (pool -> dev , pool -> allocation , page -> vaddr , dma );
252
320
list_del (& page -> page_list );
253
321
kfree (page );
@@ -336,29 +404,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
336
404
page -> offset = * (int * )(page -> vaddr + offset );
337
405
retval = offset + page -> vaddr ;
338
406
* handle = offset + page -> dma ;
339
- #ifdef DMAPOOL_DEBUG
340
- {
341
- int i ;
342
- u8 * data = retval ;
343
- /* page->offset is stored in first 4 bytes */
344
- for (i = sizeof (page -> offset ); i < pool -> size ; i ++ ) {
345
- if (data [i ] == POOL_POISON_FREED )
346
- continue ;
347
- dev_err (pool -> dev , "%s %s, %p (corrupted)\n" ,
348
- __func__ , pool -> name , retval );
349
-
350
- /*
351
- * Dump the first 4 bytes even if they are not
352
- * POOL_POISON_FREED
353
- */
354
- print_hex_dump (KERN_ERR , "" , DUMP_PREFIX_OFFSET , 16 , 1 ,
355
- data , pool -> size , 1 );
356
- break ;
357
- }
358
- }
359
- if (!want_init_on_alloc (mem_flags ))
360
- memset (retval , POOL_POISON_ALLOCATED , pool -> size );
361
- #endif
407
+ pool_check_block (pool , retval , offset , mem_flags );
362
408
spin_unlock_irqrestore (& pool -> lock , flags );
363
409
364
410
if (want_init_on_alloc (mem_flags ))
@@ -394,7 +440,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
394
440
{
395
441
struct dma_page * page ;
396
442
unsigned long flags ;
397
- unsigned int offset ;
398
443
399
444
spin_lock_irqsave (& pool -> lock , flags );
400
445
page = pool_find_page (pool , dma );
@@ -405,35 +450,16 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
405
450
return ;
406
451
}
407
452
408
- offset = vaddr - page -> vaddr ;
409
453
if (want_init_on_free ())
410
454
memset (vaddr , 0 , pool -> size );
411
- #ifdef DMAPOOL_DEBUG
412
- if ((dma - page -> dma ) != offset ) {
455
+ if (pool_page_err (pool , page , vaddr , dma )) {
413
456
spin_unlock_irqrestore (& pool -> lock , flags );
414
- dev_err (pool -> dev , "%s %s, %p (bad vaddr)/%pad\n" ,
415
- __func__ , pool -> name , vaddr , & dma );
416
457
return ;
417
458
}
418
- {
419
- unsigned int chain = page -> offset ;
420
- while (chain < pool -> allocation ) {
421
- if (chain != offset ) {
422
- chain = * (int * )(page -> vaddr + chain );
423
- continue ;
424
- }
425
- spin_unlock_irqrestore (& pool -> lock , flags );
426
- dev_err (pool -> dev , "%s %s, dma %pad already free\n" ,
427
- __func__ , pool -> name , & dma );
428
- return ;
429
- }
430
- }
431
- memset (vaddr , POOL_POISON_FREED , pool -> size );
432
- #endif
433
459
434
460
page -> in_use -- ;
435
461
* (int * )vaddr = page -> offset ;
436
- page -> offset = offset ;
462
+ page -> offset = vaddr - page -> vaddr ;
437
463
/*
438
464
* Resist a temptation to do
439
465
* if (!is_page_busy(page)) pool_free_page(pool, page);
0 commit comments