dma-debug: add initialization code
[safe/jmp/linux-2.6] / lib / dma-debug.c
1 /*
2  * Copyright (C) 2008 Advanced Micro Devices, Inc.
3  *
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/dma-debug.h>
21 #include <linux/spinlock.h>
22 #include <linux/types.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25
26 #define HASH_SIZE       1024ULL
27 #define HASH_FN_SHIFT   13
28 #define HASH_FN_MASK    (HASH_SIZE - 1)
29
30 enum {
31         dma_debug_single,
32         dma_debug_page,
33         dma_debug_sg,
34         dma_debug_coherent,
35 };
36
37 struct dma_debug_entry {
38         struct list_head list;
39         struct device    *dev;
40         int              type;
41         phys_addr_t      paddr;
42         u64              dev_addr;
43         u64              size;
44         int              direction;
45         int              sg_call_ents;
46         int              sg_mapped_ents;
47 };
48
49 struct hash_bucket {
50         struct list_head list;
51         spinlock_t lock;
52 } __cacheline_aligned_in_smp;
53
54 /* Hash list to save the allocated dma addresses */
55 static struct hash_bucket dma_entry_hash[HASH_SIZE];
56 /* List of pre-allocated dma_debug_entry's */
57 static LIST_HEAD(free_entries);
58 /* Lock for the list above */
59 static DEFINE_SPINLOCK(free_entries_lock);
60
61 /* Global disable flag - will be set in case of an error */
62 static bool global_disable __read_mostly;
63
64 static u32 num_free_entries;
65 static u32 min_free_entries;
66
67 /*
68  * Hash related functions
69  *
70  * Every DMA-API request is saved into a struct dma_debug_entry. To
71  * have quick access to these structs they are stored into a hash.
72  */
73 static int hash_fn(struct dma_debug_entry *entry)
74 {
75         /*
76          * Hash function is based on the dma address.
77          * We use bits 20-27 here as the index into the hash
78          */
79         return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
80 }
81
82 /*
83  * Request exclusive access to a hash bucket for a given dma_debug_entry.
84  */
85 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
86                                            unsigned long *flags)
87 {
88         int idx = hash_fn(entry);
89         unsigned long __flags;
90
91         spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
92         *flags = __flags;
93         return &dma_entry_hash[idx];
94 }
95
96 /*
97  * Give up exclusive access to the hash bucket
98  */
99 static void put_hash_bucket(struct hash_bucket *bucket,
100                             unsigned long *flags)
101 {
102         unsigned long __flags = *flags;
103
104         spin_unlock_irqrestore(&bucket->lock, __flags);
105 }
106
107 /*
108  * Search a given entry in the hash bucket list
109  */
110 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
111                                                 struct dma_debug_entry *ref)
112 {
113         struct dma_debug_entry *entry;
114
115         list_for_each_entry(entry, &bucket->list, list) {
116                 if ((entry->dev_addr == ref->dev_addr) &&
117                     (entry->dev == ref->dev))
118                         return entry;
119         }
120
121         return NULL;
122 }
123
124 /*
125  * Add an entry to a hash bucket
126  */
127 static void hash_bucket_add(struct hash_bucket *bucket,
128                             struct dma_debug_entry *entry)
129 {
130         list_add_tail(&entry->list, &bucket->list);
131 }
132
133 /*
134  * Remove entry from a hash bucket list
135  */
136 static void hash_bucket_del(struct dma_debug_entry *entry)
137 {
138         list_del(&entry->list);
139 }
140
141 /*
142  * Wrapper function for adding an entry to the hash.
143  * This function takes care of locking itself.
144  */
145 static void add_dma_entry(struct dma_debug_entry *entry)
146 {
147         struct hash_bucket *bucket;
148         unsigned long flags;
149
150         bucket = get_hash_bucket(entry, &flags);
151         hash_bucket_add(bucket, entry);
152         put_hash_bucket(bucket, &flags);
153 }
154
155 /* struct dma_entry allocator
156  *
157  * The next two functions implement the allocator for
158  * struct dma_debug_entries.
159  */
160 static struct dma_debug_entry *dma_entry_alloc(void)
161 {
162         struct dma_debug_entry *entry = NULL;
163         unsigned long flags;
164
165         spin_lock_irqsave(&free_entries_lock, flags);
166
167         if (list_empty(&free_entries)) {
168                 printk(KERN_ERR "DMA-API: debugging out of memory "
169                                 "- disabling\n");
170                 global_disable = true;
171                 goto out;
172         }
173
174         entry = list_entry(free_entries.next, struct dma_debug_entry, list);
175         list_del(&entry->list);
176         memset(entry, 0, sizeof(*entry));
177
178         num_free_entries -= 1;
179         if (num_free_entries < min_free_entries)
180                 min_free_entries = num_free_entries;
181
182 out:
183         spin_unlock_irqrestore(&free_entries_lock, flags);
184
185         return entry;
186 }
187
188 static void dma_entry_free(struct dma_debug_entry *entry)
189 {
190         unsigned long flags;
191
192         /*
193          * add to beginning of the list - this way the entries are
194          * more likely cache hot when they are reallocated.
195          */
196         spin_lock_irqsave(&free_entries_lock, flags);
197         list_add(&entry->list, &free_entries);
198         num_free_entries += 1;
199         spin_unlock_irqrestore(&free_entries_lock, flags);
200 }
201
202 /*
203  * DMA-API debugging init code
204  *
205  * The init code does two things:
206  *   1. Initialize core data structures
207  *   2. Preallocate a given number of dma_debug_entry structs
208  */
209
210 static int prealloc_memory(u32 num_entries)
211 {
212         struct dma_debug_entry *entry, *next_entry;
213         int i;
214
215         for (i = 0; i < num_entries; ++i) {
216                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
217                 if (!entry)
218                         goto out_err;
219
220                 list_add_tail(&entry->list, &free_entries);
221         }
222
223         num_free_entries = num_entries;
224         min_free_entries = num_entries;
225
226         printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
227                         num_entries);
228
229         return 0;
230
231 out_err:
232
233         list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
234                 list_del(&entry->list);
235                 kfree(entry);
236         }
237
238         return -ENOMEM;
239 }
240
241 /*
242  * Let the architectures decide how many entries should be preallocated.
243  */
244 void dma_debug_init(u32 num_entries)
245 {
246         int i;
247
248         if (global_disable)
249                 return;
250
251         for (i = 0; i < HASH_SIZE; ++i) {
252                 INIT_LIST_HEAD(&dma_entry_hash[i].list);
253                 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
254         }
255
256         if (prealloc_memory(num_entries) != 0) {
257                 printk(KERN_ERR "DMA-API: debugging out of memory error "
258                                 "- disabled\n");
259                 global_disable = true;
260
261                 return;
262         }
263
264         printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
265 }
266