2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/dma-debug.h>
21 #include <linux/spinlock.h>
22 #include <linux/types.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
26 #define HASH_SIZE 1024ULL
27 #define HASH_FN_SHIFT 13
28 #define HASH_FN_MASK (HASH_SIZE - 1)
37 struct dma_debug_entry {
38 struct list_head list;
50 struct list_head list;
52 } __cacheline_aligned_in_smp;
54 /* Hash list to save the allocated dma addresses */
55 static struct hash_bucket dma_entry_hash[HASH_SIZE];
56 /* List of pre-allocated dma_debug_entry's */
57 static LIST_HEAD(free_entries);
58 /* Lock for the list above */
59 static DEFINE_SPINLOCK(free_entries_lock);
61 /* Global disable flag - will be set in case of an error */
62 static bool global_disable __read_mostly;
64 static u32 num_free_entries;
65 static u32 min_free_entries;
67 /* number of preallocated entries requested by kernel cmdline */
68 static u32 req_entries;
71 * Hash related functions
73 * Every DMA-API request is saved into a struct dma_debug_entry. To
74 * have quick access to these structs they are stored into a hash.
76 static int hash_fn(struct dma_debug_entry *entry)
79 * Hash function is based on the dma address.
80 * We use bits 20-27 here as the index into the hash
82 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
86 * Request exclusive access to a hash bucket for a given dma_debug_entry.
88 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
91 int idx = hash_fn(entry);
92 unsigned long __flags;
94 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
96 return &dma_entry_hash[idx];
100 * Give up exclusive access to the hash bucket
102 static void put_hash_bucket(struct hash_bucket *bucket,
103 unsigned long *flags)
105 unsigned long __flags = *flags;
107 spin_unlock_irqrestore(&bucket->lock, __flags);
111 * Search a given entry in the hash bucket list
113 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
114 struct dma_debug_entry *ref)
116 struct dma_debug_entry *entry;
118 list_for_each_entry(entry, &bucket->list, list) {
119 if ((entry->dev_addr == ref->dev_addr) &&
120 (entry->dev == ref->dev))
128 * Add an entry to a hash bucket
130 static void hash_bucket_add(struct hash_bucket *bucket,
131 struct dma_debug_entry *entry)
133 list_add_tail(&entry->list, &bucket->list);
137 * Remove entry from a hash bucket list
139 static void hash_bucket_del(struct dma_debug_entry *entry)
141 list_del(&entry->list);
145 * Wrapper function for adding an entry to the hash.
146 * This function takes care of locking itself.
148 static void add_dma_entry(struct dma_debug_entry *entry)
150 struct hash_bucket *bucket;
153 bucket = get_hash_bucket(entry, &flags);
154 hash_bucket_add(bucket, entry);
155 put_hash_bucket(bucket, &flags);
158 /* struct dma_entry allocator
160 * The next two functions implement the allocator for
161 * struct dma_debug_entries.
163 static struct dma_debug_entry *dma_entry_alloc(void)
165 struct dma_debug_entry *entry = NULL;
168 spin_lock_irqsave(&free_entries_lock, flags);
170 if (list_empty(&free_entries)) {
171 printk(KERN_ERR "DMA-API: debugging out of memory "
173 global_disable = true;
177 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
178 list_del(&entry->list);
179 memset(entry, 0, sizeof(*entry));
181 num_free_entries -= 1;
182 if (num_free_entries < min_free_entries)
183 min_free_entries = num_free_entries;
186 spin_unlock_irqrestore(&free_entries_lock, flags);
191 static void dma_entry_free(struct dma_debug_entry *entry)
196 * add to beginning of the list - this way the entries are
197 * more likely cache hot when they are reallocated.
199 spin_lock_irqsave(&free_entries_lock, flags);
200 list_add(&entry->list, &free_entries);
201 num_free_entries += 1;
202 spin_unlock_irqrestore(&free_entries_lock, flags);
206 * DMA-API debugging init code
208 * The init code does two things:
209 * 1. Initialize core data structures
210 * 2. Preallocate a given number of dma_debug_entry structs
213 static int prealloc_memory(u32 num_entries)
215 struct dma_debug_entry *entry, *next_entry;
218 for (i = 0; i < num_entries; ++i) {
219 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
223 list_add_tail(&entry->list, &free_entries);
226 num_free_entries = num_entries;
227 min_free_entries = num_entries;
229 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
236 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
237 list_del(&entry->list);
245 * Let the architectures decide how many entries should be preallocated.
247 void dma_debug_init(u32 num_entries)
254 for (i = 0; i < HASH_SIZE; ++i) {
255 INIT_LIST_HEAD(&dma_entry_hash[i].list);
256 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
260 num_entries = req_entries;
262 if (prealloc_memory(num_entries) != 0) {
263 printk(KERN_ERR "DMA-API: debugging out of memory error "
265 global_disable = true;
270 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
273 static __init int dma_debug_cmdline(char *str)
278 if (strncmp(str, "off", 3) == 0) {
279 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
281 global_disable = true;
287 static __init int dma_debug_entries_cmdline(char *str)
294 res = get_option(&str, &req_entries);
302 __setup("dma_debug=", dma_debug_cmdline);
303 __setup("dma_debug_entries=", dma_debug_entries_cmdline);