dma-debug: add checks for sync_single_sg_*
[safe/jmp/linux-2.6] / lib / dma-debug.c
1 /*
2  * Copyright (C) 2008 Advanced Micro Devices, Inc.
3  *
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-debug.h>
23 #include <linux/spinlock.h>
24 #include <linux/debugfs.h>
25 #include <linux/device.h>
26 #include <linux/types.h>
27 #include <linux/sched.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30
31 #define HASH_SIZE       1024ULL
32 #define HASH_FN_SHIFT   13
33 #define HASH_FN_MASK    (HASH_SIZE - 1)
34
35 enum {
36         dma_debug_single,
37         dma_debug_page,
38         dma_debug_sg,
39         dma_debug_coherent,
40 };
41
42 struct dma_debug_entry {
43         struct list_head list;
44         struct device    *dev;
45         int              type;
46         phys_addr_t      paddr;
47         u64              dev_addr;
48         u64              size;
49         int              direction;
50         int              sg_call_ents;
51         int              sg_mapped_ents;
52 };
53
54 struct hash_bucket {
55         struct list_head list;
56         spinlock_t lock;
57 } ____cacheline_aligned_in_smp;
58
59 /* Hash list to save the allocated dma addresses */
60 static struct hash_bucket dma_entry_hash[HASH_SIZE];
61 /* List of pre-allocated dma_debug_entry's */
62 static LIST_HEAD(free_entries);
63 /* Lock for the list above */
64 static DEFINE_SPINLOCK(free_entries_lock);
65
66 /* Global disable flag - will be set in case of an error */
67 static bool global_disable __read_mostly;
68
69 /* Global error count */
70 static u32 error_count;
71
72 /* Global error show enable*/
73 static u32 show_all_errors __read_mostly;
74 /* Number of errors to show */
75 static u32 show_num_errors = 1;
76
77 static u32 num_free_entries;
78 static u32 min_free_entries;
79
80 /* number of preallocated entries requested by kernel cmdline */
81 static u32 req_entries;
82
83 /* debugfs dentry's for the stuff above */
84 static struct dentry *dma_debug_dent        __read_mostly;
85 static struct dentry *global_disable_dent   __read_mostly;
86 static struct dentry *error_count_dent      __read_mostly;
87 static struct dentry *show_all_errors_dent  __read_mostly;
88 static struct dentry *show_num_errors_dent  __read_mostly;
89 static struct dentry *num_free_entries_dent __read_mostly;
90 static struct dentry *min_free_entries_dent __read_mostly;
91
92 static const char *type2name[4] = { "single", "page",
93                                     "scather-gather", "coherent" };
94
95 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
96                                    "DMA_FROM_DEVICE", "DMA_NONE" };
97
98 /*
99  * The access to some variables in this macro is racy. We can't use atomic_t
100  * here because all these variables are exported to debugfs. Some of them even
101  * writeable. This is also the reason why a lock won't help much. But anyway,
102  * the races are no big deal. Here is why:
103  *
104  *   error_count: the addition is racy, but the worst thing that can happen is
105  *                that we don't count some errors
106  *   show_num_errors: the subtraction is racy. Also no big deal because in
107  *                    worst case this will result in one warning more in the
108  *                    system log than the user configured. This variable is
109  *                    writeable via debugfs.
110  */
111 #define err_printk(dev, format, arg...) do {                    \
112                 error_count += 1;                               \
113                 if (show_all_errors || show_num_errors > 0) {   \
114                         WARN(1, "%s %s: " format,               \
115                              dev_driver_string(dev),            \
116                              dev_name(dev) , ## arg);           \
117                 }                                               \
118                 if (!show_all_errors && show_num_errors > 0)    \
119                         show_num_errors -= 1;                   \
120         } while (0);
121
122 /*
123  * Hash related functions
124  *
125  * Every DMA-API request is saved into a struct dma_debug_entry. To
126  * have quick access to these structs they are stored into a hash.
127  */
128 static int hash_fn(struct dma_debug_entry *entry)
129 {
130         /*
131          * Hash function is based on the dma address.
132          * We use bits 20-27 here as the index into the hash
133          */
134         return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
135 }
136
137 /*
138  * Request exclusive access to a hash bucket for a given dma_debug_entry.
139  */
140 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
141                                            unsigned long *flags)
142 {
143         int idx = hash_fn(entry);
144         unsigned long __flags;
145
146         spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
147         *flags = __flags;
148         return &dma_entry_hash[idx];
149 }
150
151 /*
152  * Give up exclusive access to the hash bucket
153  */
154 static void put_hash_bucket(struct hash_bucket *bucket,
155                             unsigned long *flags)
156 {
157         unsigned long __flags = *flags;
158
159         spin_unlock_irqrestore(&bucket->lock, __flags);
160 }
161
162 /*
163  * Search a given entry in the hash bucket list
164  */
165 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
166                                                 struct dma_debug_entry *ref)
167 {
168         struct dma_debug_entry *entry;
169
170         list_for_each_entry(entry, &bucket->list, list) {
171                 if ((entry->dev_addr == ref->dev_addr) &&
172                     (entry->dev == ref->dev))
173                         return entry;
174         }
175
176         return NULL;
177 }
178
179 /*
180  * Add an entry to a hash bucket
181  */
182 static void hash_bucket_add(struct hash_bucket *bucket,
183                             struct dma_debug_entry *entry)
184 {
185         list_add_tail(&entry->list, &bucket->list);
186 }
187
188 /*
189  * Remove entry from a hash bucket list
190  */
191 static void hash_bucket_del(struct dma_debug_entry *entry)
192 {
193         list_del(&entry->list);
194 }
195
196 /*
197  * Wrapper function for adding an entry to the hash.
198  * This function takes care of locking itself.
199  */
200 static void add_dma_entry(struct dma_debug_entry *entry)
201 {
202         struct hash_bucket *bucket;
203         unsigned long flags;
204
205         bucket = get_hash_bucket(entry, &flags);
206         hash_bucket_add(bucket, entry);
207         put_hash_bucket(bucket, &flags);
208 }
209
210 /* struct dma_entry allocator
211  *
212  * The next two functions implement the allocator for
213  * struct dma_debug_entries.
214  */
215 static struct dma_debug_entry *dma_entry_alloc(void)
216 {
217         struct dma_debug_entry *entry = NULL;
218         unsigned long flags;
219
220         spin_lock_irqsave(&free_entries_lock, flags);
221
222         if (list_empty(&free_entries)) {
223                 printk(KERN_ERR "DMA-API: debugging out of memory "
224                                 "- disabling\n");
225                 global_disable = true;
226                 goto out;
227         }
228
229         entry = list_entry(free_entries.next, struct dma_debug_entry, list);
230         list_del(&entry->list);
231         memset(entry, 0, sizeof(*entry));
232
233         num_free_entries -= 1;
234         if (num_free_entries < min_free_entries)
235                 min_free_entries = num_free_entries;
236
237 out:
238         spin_unlock_irqrestore(&free_entries_lock, flags);
239
240         return entry;
241 }
242
243 static void dma_entry_free(struct dma_debug_entry *entry)
244 {
245         unsigned long flags;
246
247         /*
248          * add to beginning of the list - this way the entries are
249          * more likely cache hot when they are reallocated.
250          */
251         spin_lock_irqsave(&free_entries_lock, flags);
252         list_add(&entry->list, &free_entries);
253         num_free_entries += 1;
254         spin_unlock_irqrestore(&free_entries_lock, flags);
255 }
256
257 /*
258  * DMA-API debugging init code
259  *
260  * The init code does two things:
261  *   1. Initialize core data structures
262  *   2. Preallocate a given number of dma_debug_entry structs
263  */
264
265 static int prealloc_memory(u32 num_entries)
266 {
267         struct dma_debug_entry *entry, *next_entry;
268         int i;
269
270         for (i = 0; i < num_entries; ++i) {
271                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
272                 if (!entry)
273                         goto out_err;
274
275                 list_add_tail(&entry->list, &free_entries);
276         }
277
278         num_free_entries = num_entries;
279         min_free_entries = num_entries;
280
281         printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
282                         num_entries);
283
284         return 0;
285
286 out_err:
287
288         list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
289                 list_del(&entry->list);
290                 kfree(entry);
291         }
292
293         return -ENOMEM;
294 }
295
296 static int dma_debug_fs_init(void)
297 {
298         dma_debug_dent = debugfs_create_dir("dma-api", NULL);
299         if (!dma_debug_dent) {
300                 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
301                 return -ENOMEM;
302         }
303
304         global_disable_dent = debugfs_create_bool("disabled", 0444,
305                         dma_debug_dent,
306                         (u32 *)&global_disable);
307         if (!global_disable_dent)
308                 goto out_err;
309
310         error_count_dent = debugfs_create_u32("error_count", 0444,
311                         dma_debug_dent, &error_count);
312         if (!error_count_dent)
313                 goto out_err;
314
315         show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
316                         dma_debug_dent,
317                         &show_all_errors);
318         if (!show_all_errors_dent)
319                 goto out_err;
320
321         show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
322                         dma_debug_dent,
323                         &show_num_errors);
324         if (!show_num_errors_dent)
325                 goto out_err;
326
327         num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
328                         dma_debug_dent,
329                         &num_free_entries);
330         if (!num_free_entries_dent)
331                 goto out_err;
332
333         min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
334                         dma_debug_dent,
335                         &min_free_entries);
336         if (!min_free_entries_dent)
337                 goto out_err;
338
339         return 0;
340
341 out_err:
342         debugfs_remove_recursive(dma_debug_dent);
343
344         return -ENOMEM;
345 }
346
347
348 /*
349  * Let the architectures decide how many entries should be preallocated.
350  */
351 void dma_debug_init(u32 num_entries)
352 {
353         int i;
354
355         if (global_disable)
356                 return;
357
358         for (i = 0; i < HASH_SIZE; ++i) {
359                 INIT_LIST_HEAD(&dma_entry_hash[i].list);
360                 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
361         }
362
363         if (dma_debug_fs_init() != 0) {
364                 printk(KERN_ERR "DMA-API: error creating debugfs entries "
365                                 "- disabling\n");
366                 global_disable = true;
367
368                 return;
369         }
370
371         if (req_entries)
372                 num_entries = req_entries;
373
374         if (prealloc_memory(num_entries) != 0) {
375                 printk(KERN_ERR "DMA-API: debugging out of memory error "
376                                 "- disabled\n");
377                 global_disable = true;
378
379                 return;
380         }
381
382         printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
383 }
384
385 static __init int dma_debug_cmdline(char *str)
386 {
387         if (!str)
388                 return -EINVAL;
389
390         if (strncmp(str, "off", 3) == 0) {
391                 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
392                                  "command line\n");
393                 global_disable = true;
394         }
395
396         return 0;
397 }
398
399 static __init int dma_debug_entries_cmdline(char *str)
400 {
401         int res;
402
403         if (!str)
404                 return -EINVAL;
405
406         res = get_option(&str, &req_entries);
407
408         if (!res)
409                 req_entries = 0;
410
411         return 0;
412 }
413
414 __setup("dma_debug=", dma_debug_cmdline);
415 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
416
417 static void check_unmap(struct dma_debug_entry *ref)
418 {
419         struct dma_debug_entry *entry;
420         struct hash_bucket *bucket;
421         unsigned long flags;
422
423         if (dma_mapping_error(ref->dev, ref->dev_addr))
424                 return;
425
426         bucket = get_hash_bucket(ref, &flags);
427         entry = hash_bucket_find(bucket, ref);
428
429         if (!entry) {
430                 err_printk(ref->dev, "DMA-API: device driver tries "
431                            "to free DMA memory it has not allocated "
432                            "[device address=0x%016llx] [size=%llu bytes]\n",
433                            ref->dev_addr, ref->size);
434                 goto out;
435         }
436
437         if (ref->size != entry->size) {
438                 err_printk(ref->dev, "DMA-API: device driver frees "
439                            "DMA memory with different size "
440                            "[device address=0x%016llx] [map size=%llu bytes] "
441                            "[unmap size=%llu bytes]\n",
442                            ref->dev_addr, entry->size, ref->size);
443         }
444
445         if (ref->type != entry->type) {
446                 err_printk(ref->dev, "DMA-API: device driver frees "
447                            "DMA memory with wrong function "
448                            "[device address=0x%016llx] [size=%llu bytes] "
449                            "[mapped as %s] [unmapped as %s]\n",
450                            ref->dev_addr, ref->size,
451                            type2name[entry->type], type2name[ref->type]);
452         } else if ((entry->type == dma_debug_coherent) &&
453                    (ref->paddr != entry->paddr)) {
454                 err_printk(ref->dev, "DMA-API: device driver frees "
455                            "DMA memory with different CPU address "
456                            "[device address=0x%016llx] [size=%llu bytes] "
457                            "[cpu alloc address=%p] [cpu free address=%p]",
458                            ref->dev_addr, ref->size,
459                            (void *)entry->paddr, (void *)ref->paddr);
460         }
461
462         if (ref->sg_call_ents && ref->type == dma_debug_sg &&
463             ref->sg_call_ents != entry->sg_call_ents) {
464                 err_printk(ref->dev, "DMA-API: device driver frees "
465                            "DMA sg list with different entry count "
466                            "[map count=%d] [unmap count=%d]\n",
467                            entry->sg_call_ents, ref->sg_call_ents);
468         }
469
470         /*
471          * This may be no bug in reality - but most implementations of the
472          * DMA API don't handle this properly, so check for it here
473          */
474         if (ref->direction != entry->direction) {
475                 err_printk(ref->dev, "DMA-API: device driver frees "
476                            "DMA memory with different direction "
477                            "[device address=0x%016llx] [size=%llu bytes] "
478                            "[mapped with %s] [unmapped with %s]\n",
479                            ref->dev_addr, ref->size,
480                            dir2name[entry->direction],
481                            dir2name[ref->direction]);
482         }
483
484         hash_bucket_del(entry);
485         dma_entry_free(entry);
486
487 out:
488         put_hash_bucket(bucket, &flags);
489 }
490
491 static void check_for_stack(struct device *dev, void *addr)
492 {
493         if (object_is_on_stack(addr))
494                 err_printk(dev, "DMA-API: device driver maps memory from stack"
495                                 " [addr=%p]\n", addr);
496 }
497
498 static void check_sync(struct device *dev, dma_addr_t addr,
499                        u64 size, u64 offset, int direction, bool to_cpu)
500 {
501         struct dma_debug_entry ref = {
502                 .dev            = dev,
503                 .dev_addr       = addr,
504                 .size           = size,
505                 .direction      = direction,
506         };
507         struct dma_debug_entry *entry;
508         struct hash_bucket *bucket;
509         unsigned long flags;
510
511         bucket = get_hash_bucket(&ref, &flags);
512
513         entry = hash_bucket_find(bucket, &ref);
514
515         if (!entry) {
516                 err_printk(dev, "DMA-API: device driver tries "
517                                 "to sync DMA memory it has not allocated "
518                                 "[device address=0x%016llx] [size=%llu bytes]\n",
519                                 addr, size);
520                 goto out;
521         }
522
523         if ((offset + size) > entry->size) {
524                 err_printk(dev, "DMA-API: device driver syncs"
525                                 " DMA memory outside allocated range "
526                                 "[device address=0x%016llx] "
527                                 "[allocation size=%llu bytes] [sync offset=%llu] "
528                                 "[sync size=%llu]\n", entry->dev_addr, entry->size,
529                                 offset, size);
530         }
531
532         if (direction != entry->direction) {
533                 err_printk(dev, "DMA-API: device driver syncs "
534                                 "DMA memory with different direction "
535                                 "[device address=0x%016llx] [size=%llu bytes] "
536                                 "[mapped with %s] [synced with %s]\n",
537                                 addr, entry->size,
538                                 dir2name[entry->direction],
539                                 dir2name[direction]);
540         }
541
542         if (entry->direction == DMA_BIDIRECTIONAL)
543                 goto out;
544
545         if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
546                       !(direction == DMA_TO_DEVICE))
547                 err_printk(dev, "DMA-API: device driver syncs "
548                                 "device read-only DMA memory for cpu "
549                                 "[device address=0x%016llx] [size=%llu bytes] "
550                                 "[mapped with %s] [synced with %s]\n",
551                                 addr, entry->size,
552                                 dir2name[entry->direction],
553                                 dir2name[direction]);
554
555         if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
556                        !(direction == DMA_FROM_DEVICE))
557                 err_printk(dev, "DMA-API: device driver syncs "
558                                 "device write-only DMA memory to device "
559                                 "[device address=0x%016llx] [size=%llu bytes] "
560                                 "[mapped with %s] [synced with %s]\n",
561                                 addr, entry->size,
562                                 dir2name[entry->direction],
563                                 dir2name[direction]);
564
565 out:
566         put_hash_bucket(bucket, &flags);
567
568 }
569
570 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
571                         size_t size, int direction, dma_addr_t dma_addr,
572                         bool map_single)
573 {
574         struct dma_debug_entry *entry;
575
576         if (unlikely(global_disable))
577                 return;
578
579         if (unlikely(dma_mapping_error(dev, dma_addr)))
580                 return;
581
582         entry = dma_entry_alloc();
583         if (!entry)
584                 return;
585
586         entry->dev       = dev;
587         entry->type      = dma_debug_page;
588         entry->paddr     = page_to_phys(page) + offset;
589         entry->dev_addr  = dma_addr;
590         entry->size      = size;
591         entry->direction = direction;
592
593         if (map_single) {
594                 entry->type = dma_debug_single;
595                 check_for_stack(dev, page_address(page) + offset);
596         }
597
598         add_dma_entry(entry);
599 }
600 EXPORT_SYMBOL(debug_dma_map_page);
601
602 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
603                           size_t size, int direction, bool map_single)
604 {
605         struct dma_debug_entry ref = {
606                 .type           = dma_debug_page,
607                 .dev            = dev,
608                 .dev_addr       = addr,
609                 .size           = size,
610                 .direction      = direction,
611         };
612
613         if (unlikely(global_disable))
614                 return;
615
616         if (map_single)
617                 ref.type = dma_debug_single;
618
619         check_unmap(&ref);
620 }
621 EXPORT_SYMBOL(debug_dma_unmap_page);
622
623 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
624                       int nents, int mapped_ents, int direction)
625 {
626         struct dma_debug_entry *entry;
627         struct scatterlist *s;
628         int i;
629
630         if (unlikely(global_disable))
631                 return;
632
633         for_each_sg(sg, s, mapped_ents, i) {
634                 entry = dma_entry_alloc();
635                 if (!entry)
636                         return;
637
638                 entry->type           = dma_debug_sg;
639                 entry->dev            = dev;
640                 entry->paddr          = sg_phys(s);
641                 entry->size           = s->length;
642                 entry->dev_addr       = s->dma_address;
643                 entry->direction      = direction;
644                 entry->sg_call_ents   = nents;
645                 entry->sg_mapped_ents = mapped_ents;
646
647                 check_for_stack(dev, sg_virt(s));
648
649                 add_dma_entry(entry);
650         }
651 }
652 EXPORT_SYMBOL(debug_dma_map_sg);
653
654 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
655                         int nelems, int dir)
656 {
657         struct dma_debug_entry *entry;
658         struct scatterlist *s;
659         int mapped_ents = 0, i;
660         unsigned long flags;
661
662         if (unlikely(global_disable))
663                 return;
664
665         for_each_sg(sglist, s, nelems, i) {
666
667                 struct dma_debug_entry ref = {
668                         .type           = dma_debug_sg,
669                         .dev            = dev,
670                         .paddr          = sg_phys(s),
671                         .dev_addr       = s->dma_address,
672                         .size           = s->length,
673                         .direction      = dir,
674                         .sg_call_ents   = 0,
675                 };
676
677                 if (mapped_ents && i >= mapped_ents)
678                         break;
679
680                 if (mapped_ents == 0) {
681                         struct hash_bucket *bucket;
682                         ref.sg_call_ents = nelems;
683                         bucket = get_hash_bucket(&ref, &flags);
684                         entry = hash_bucket_find(bucket, &ref);
685                         if (entry)
686                                 mapped_ents = entry->sg_mapped_ents;
687                         put_hash_bucket(bucket, &flags);
688                 }
689
690                 check_unmap(&ref);
691         }
692 }
693 EXPORT_SYMBOL(debug_dma_unmap_sg);
694
695 void debug_dma_alloc_coherent(struct device *dev, size_t size,
696                               dma_addr_t dma_addr, void *virt)
697 {
698         struct dma_debug_entry *entry;
699
700         if (unlikely(global_disable))
701                 return;
702
703         if (unlikely(virt == NULL))
704                 return;
705
706         entry = dma_entry_alloc();
707         if (!entry)
708                 return;
709
710         entry->type      = dma_debug_coherent;
711         entry->dev       = dev;
712         entry->paddr     = virt_to_phys(virt);
713         entry->size      = size;
714         entry->dev_addr  = dma_addr;
715         entry->direction = DMA_BIDIRECTIONAL;
716
717         add_dma_entry(entry);
718 }
719 EXPORT_SYMBOL(debug_dma_alloc_coherent);
720
721 void debug_dma_free_coherent(struct device *dev, size_t size,
722                          void *virt, dma_addr_t addr)
723 {
724         struct dma_debug_entry ref = {
725                 .type           = dma_debug_coherent,
726                 .dev            = dev,
727                 .paddr          = virt_to_phys(virt),
728                 .dev_addr       = addr,
729                 .size           = size,
730                 .direction      = DMA_BIDIRECTIONAL,
731         };
732
733         if (unlikely(global_disable))
734                 return;
735
736         check_unmap(&ref);
737 }
738 EXPORT_SYMBOL(debug_dma_free_coherent);
739
740 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
741                                    size_t size, int direction)
742 {
743         if (unlikely(global_disable))
744                 return;
745
746         check_sync(dev, dma_handle, size, 0, direction, true);
747 }
748 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
749
750 void debug_dma_sync_single_for_device(struct device *dev,
751                                       dma_addr_t dma_handle, size_t size,
752                                       int direction)
753 {
754         if (unlikely(global_disable))
755                 return;
756
757         check_sync(dev, dma_handle, size, 0, direction, false);
758 }
759 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
760
761 void debug_dma_sync_single_range_for_cpu(struct device *dev,
762                                          dma_addr_t dma_handle,
763                                          unsigned long offset, size_t size,
764                                          int direction)
765 {
766         if (unlikely(global_disable))
767                 return;
768
769         check_sync(dev, dma_handle, size, offset, direction, true);
770 }
771 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
772
773 void debug_dma_sync_single_range_for_device(struct device *dev,
774                                             dma_addr_t dma_handle,
775                                             unsigned long offset,
776                                             size_t size, int direction)
777 {
778         if (unlikely(global_disable))
779                 return;
780
781         check_sync(dev, dma_handle, size, offset, direction, false);
782 }
783 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
784
785 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
786                                int nelems, int direction)
787 {
788         struct scatterlist *s;
789         int i;
790
791         if (unlikely(global_disable))
792                 return;
793
794         for_each_sg(sg, s, nelems, i) {
795                 check_sync(dev, s->dma_address, s->dma_length, 0,
796                                 direction, true);
797         }
798 }
799 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
800
801 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
802                                   int nelems, int direction)
803 {
804         struct scatterlist *s;
805         int i;
806
807         if (unlikely(global_disable))
808                 return;
809
810         for_each_sg(sg, s, nelems, i) {
811                 check_sync(dev, s->dma_address, s->dma_length, 0,
812                                 direction, false);
813         }
814 }
815 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
816