drm/nv50: fix iommu errors caused by device reading from address 0
[safe/jmp/linux-2.6] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48
49 #define MM_UNUSED_TARGET 4
50
51 unsigned long drm_mm_tail_space(struct drm_mm *mm)
52 {
53         struct list_head *tail_node;
54         struct drm_mm_node *entry;
55
56         tail_node = mm->ml_entry.prev;
57         entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58         if (!entry->free)
59                 return 0;
60
61         return entry->size;
62 }
63
64 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65 {
66         struct list_head *tail_node;
67         struct drm_mm_node *entry;
68
69         tail_node = mm->ml_entry.prev;
70         entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71         if (!entry->free)
72                 return -ENOMEM;
73
74         if (entry->size <= size)
75                 return -ENOMEM;
76
77         entry->size -= size;
78         return 0;
79 }
80
81 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82 {
83         struct drm_mm_node *child;
84
85         if (atomic)
86                 child = kmalloc(sizeof(*child), GFP_ATOMIC);
87         else
88                 child = kmalloc(sizeof(*child), GFP_KERNEL);
89
90         if (unlikely(child == NULL)) {
91                 spin_lock(&mm->unused_lock);
92                 if (list_empty(&mm->unused_nodes))
93                         child = NULL;
94                 else {
95                         child =
96                             list_entry(mm->unused_nodes.next,
97                                        struct drm_mm_node, fl_entry);
98                         list_del(&child->fl_entry);
99                         --mm->num_unused;
100                 }
101                 spin_unlock(&mm->unused_lock);
102         }
103         return child;
104 }
105
106 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
107  * drm_mm:      memory manager struct we are pre-allocating for
108  *
109  * Returns 0 on success or -ENOMEM if allocation fails.
110  */
111 int drm_mm_pre_get(struct drm_mm *mm)
112 {
113         struct drm_mm_node *node;
114
115         spin_lock(&mm->unused_lock);
116         while (mm->num_unused < MM_UNUSED_TARGET) {
117                 spin_unlock(&mm->unused_lock);
118                 node = kmalloc(sizeof(*node), GFP_KERNEL);
119                 spin_lock(&mm->unused_lock);
120
121                 if (unlikely(node == NULL)) {
122                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
123                         spin_unlock(&mm->unused_lock);
124                         return ret;
125                 }
126                 ++mm->num_unused;
127                 list_add_tail(&node->fl_entry, &mm->unused_nodes);
128         }
129         spin_unlock(&mm->unused_lock);
130         return 0;
131 }
132 EXPORT_SYMBOL(drm_mm_pre_get);
133
134 static int drm_mm_create_tail_node(struct drm_mm *mm,
135                                    unsigned long start,
136                                    unsigned long size, int atomic)
137 {
138         struct drm_mm_node *child;
139
140         child = drm_mm_kmalloc(mm, atomic);
141         if (unlikely(child == NULL))
142                 return -ENOMEM;
143
144         child->free = 1;
145         child->size = size;
146         child->start = start;
147         child->mm = mm;
148
149         list_add_tail(&child->ml_entry, &mm->ml_entry);
150         list_add_tail(&child->fl_entry, &mm->fl_entry);
151
152         return 0;
153 }
154
155 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
156 {
157         struct list_head *tail_node;
158         struct drm_mm_node *entry;
159
160         tail_node = mm->ml_entry.prev;
161         entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
162         if (!entry->free) {
163                 return drm_mm_create_tail_node(mm, entry->start + entry->size,
164                                                size, atomic);
165         }
166         entry->size += size;
167         return 0;
168 }
169
170 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
171                                                  unsigned long size,
172                                                  int atomic)
173 {
174         struct drm_mm_node *child;
175
176         child = drm_mm_kmalloc(parent->mm, atomic);
177         if (unlikely(child == NULL))
178                 return NULL;
179
180         INIT_LIST_HEAD(&child->fl_entry);
181
182         child->free = 0;
183         child->size = size;
184         child->start = parent->start;
185         child->mm = parent->mm;
186
187         list_add_tail(&child->ml_entry, &parent->ml_entry);
188         INIT_LIST_HEAD(&child->fl_entry);
189
190         parent->size -= size;
191         parent->start += size;
192         return child;
193 }
194
195
196 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
197                                              unsigned long size,
198                                              unsigned alignment,
199                                              int atomic)
200 {
201
202         struct drm_mm_node *align_splitoff = NULL;
203         unsigned tmp = 0;
204
205         if (alignment)
206                 tmp = node->start % alignment;
207
208         if (tmp) {
209                 align_splitoff =
210                     drm_mm_split_at_start(node, alignment - tmp, atomic);
211                 if (unlikely(align_splitoff == NULL))
212                         return NULL;
213         }
214
215         if (node->size == size) {
216                 list_del_init(&node->fl_entry);
217                 node->free = 0;
218         } else {
219                 node = drm_mm_split_at_start(node, size, atomic);
220         }
221
222         if (align_splitoff)
223                 drm_mm_put_block(align_splitoff);
224
225         return node;
226 }
227 EXPORT_SYMBOL(drm_mm_get_block_generic);
228
229 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
230                                                 unsigned long size,
231                                                 unsigned alignment,
232                                                 unsigned long start,
233                                                 unsigned long end,
234                                                 int atomic)
235 {
236         struct drm_mm_node *align_splitoff = NULL;
237         unsigned tmp = 0;
238         unsigned wasted = 0;
239
240         if (node->start < start)
241                 wasted += start - node->start;
242         if (alignment)
243                 tmp = ((node->start + wasted) % alignment);
244
245         if (tmp)
246                 wasted += alignment - tmp;
247         if (wasted) {
248                 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
249                 if (unlikely(align_splitoff == NULL))
250                         return NULL;
251         }
252
253         if (node->size == size) {
254                 list_del_init(&node->fl_entry);
255                 node->free = 0;
256         } else {
257                 node = drm_mm_split_at_start(node, size, atomic);
258         }
259
260         if (align_splitoff)
261                 drm_mm_put_block(align_splitoff);
262
263         return node;
264 }
265 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
266
267 /*
268  * Put a block. Merge with the previous and / or next block if they are free.
269  * Otherwise add to the free stack.
270  */
271
272 void drm_mm_put_block(struct drm_mm_node *cur)
273 {
274
275         struct drm_mm *mm = cur->mm;
276         struct list_head *cur_head = &cur->ml_entry;
277         struct list_head *root_head = &mm->ml_entry;
278         struct drm_mm_node *prev_node = NULL;
279         struct drm_mm_node *next_node;
280
281         int merged = 0;
282
283         if (cur_head->prev != root_head) {
284                 prev_node =
285                     list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
286                 if (prev_node->free) {
287                         prev_node->size += cur->size;
288                         merged = 1;
289                 }
290         }
291         if (cur_head->next != root_head) {
292                 next_node =
293                     list_entry(cur_head->next, struct drm_mm_node, ml_entry);
294                 if (next_node->free) {
295                         if (merged) {
296                                 prev_node->size += next_node->size;
297                                 list_del(&next_node->ml_entry);
298                                 list_del(&next_node->fl_entry);
299                                 spin_lock(&mm->unused_lock);
300                                 if (mm->num_unused < MM_UNUSED_TARGET) {
301                                         list_add(&next_node->fl_entry,
302                                                  &mm->unused_nodes);
303                                         ++mm->num_unused;
304                                 } else
305                                         kfree(next_node);
306                                 spin_unlock(&mm->unused_lock);
307                         } else {
308                                 next_node->size += cur->size;
309                                 next_node->start = cur->start;
310                                 merged = 1;
311                         }
312                 }
313         }
314         if (!merged) {
315                 cur->free = 1;
316                 list_add(&cur->fl_entry, &mm->fl_entry);
317         } else {
318                 list_del(&cur->ml_entry);
319                 spin_lock(&mm->unused_lock);
320                 if (mm->num_unused < MM_UNUSED_TARGET) {
321                         list_add(&cur->fl_entry, &mm->unused_nodes);
322                         ++mm->num_unused;
323                 } else
324                         kfree(cur);
325                 spin_unlock(&mm->unused_lock);
326         }
327 }
328
329 EXPORT_SYMBOL(drm_mm_put_block);
330
331 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
332                                        unsigned long size,
333                                        unsigned alignment, int best_match)
334 {
335         struct list_head *list;
336         const struct list_head *free_stack = &mm->fl_entry;
337         struct drm_mm_node *entry;
338         struct drm_mm_node *best;
339         unsigned long best_size;
340         unsigned wasted;
341
342         best = NULL;
343         best_size = ~0UL;
344
345         list_for_each(list, free_stack) {
346                 entry = list_entry(list, struct drm_mm_node, fl_entry);
347                 wasted = 0;
348
349                 if (entry->size < size)
350                         continue;
351
352                 if (alignment) {
353                         register unsigned tmp = entry->start % alignment;
354                         if (tmp)
355                                 wasted += alignment - tmp;
356                 }
357
358                 if (entry->size >= size + wasted) {
359                         if (!best_match)
360                                 return entry;
361                         if (entry->size < best_size) {
362                                 best = entry;
363                                 best_size = entry->size;
364                         }
365                 }
366         }
367
368         return best;
369 }
370 EXPORT_SYMBOL(drm_mm_search_free);
371
372 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
373                                                 unsigned long size,
374                                                 unsigned alignment,
375                                                 unsigned long start,
376                                                 unsigned long end,
377                                                 int best_match)
378 {
379         struct list_head *list;
380         const struct list_head *free_stack = &mm->fl_entry;
381         struct drm_mm_node *entry;
382         struct drm_mm_node *best;
383         unsigned long best_size;
384         unsigned wasted;
385
386         best = NULL;
387         best_size = ~0UL;
388
389         list_for_each(list, free_stack) {
390                 entry = list_entry(list, struct drm_mm_node, fl_entry);
391                 wasted = 0;
392
393                 if (entry->size < size)
394                         continue;
395
396                 if (entry->start > end || (entry->start+entry->size) < start)
397                         continue;
398
399                 if (entry->start < start)
400                         wasted += start - entry->start;
401
402                 if (alignment) {
403                         register unsigned tmp = (entry->start + wasted) % alignment;
404                         if (tmp)
405                                 wasted += alignment - tmp;
406                 }
407
408                 if (entry->size >= size + wasted &&
409                     (entry->start + wasted + size) <= end) {
410                         if (!best_match)
411                                 return entry;
412                         if (entry->size < best_size) {
413                                 best = entry;
414                                 best_size = entry->size;
415                         }
416                 }
417         }
418
419         return best;
420 }
421 EXPORT_SYMBOL(drm_mm_search_free_in_range);
422
423 int drm_mm_clean(struct drm_mm * mm)
424 {
425         struct list_head *head = &mm->ml_entry;
426
427         return (head->next->next == head);
428 }
429 EXPORT_SYMBOL(drm_mm_clean);
430
431 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
432 {
433         INIT_LIST_HEAD(&mm->ml_entry);
434         INIT_LIST_HEAD(&mm->fl_entry);
435         INIT_LIST_HEAD(&mm->unused_nodes);
436         mm->num_unused = 0;
437         spin_lock_init(&mm->unused_lock);
438
439         return drm_mm_create_tail_node(mm, start, size, 0);
440 }
441 EXPORT_SYMBOL(drm_mm_init);
442
443 void drm_mm_takedown(struct drm_mm * mm)
444 {
445         struct list_head *bnode = mm->fl_entry.next;
446         struct drm_mm_node *entry;
447         struct drm_mm_node *next;
448
449         entry = list_entry(bnode, struct drm_mm_node, fl_entry);
450
451         if (entry->ml_entry.next != &mm->ml_entry ||
452             entry->fl_entry.next != &mm->fl_entry) {
453                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
454                 return;
455         }
456
457         list_del(&entry->fl_entry);
458         list_del(&entry->ml_entry);
459         kfree(entry);
460
461         spin_lock(&mm->unused_lock);
462         list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
463                 list_del(&entry->fl_entry);
464                 kfree(entry);
465                 --mm->num_unused;
466         }
467         spin_unlock(&mm->unused_lock);
468
469         BUG_ON(mm->num_unused != 0);
470 }
471 EXPORT_SYMBOL(drm_mm_takedown);
472
473 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
474 {
475         struct drm_mm_node *entry;
476         int total_used = 0, total_free = 0, total = 0;
477
478         list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
479                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
480                         prefix, entry->start, entry->start + entry->size,
481                         entry->size, entry->free ? "free" : "used");
482                 total += entry->size;
483                 if (entry->free)
484                         total_free += entry->size;
485                 else
486                         total_used += entry->size;
487         }
488         printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
489                 total_used, total_free);
490 }
491 EXPORT_SYMBOL(drm_mm_debug_table);
492
493 #if defined(CONFIG_DEBUG_FS)
494 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
495 {
496         struct drm_mm_node *entry;
497         int total_used = 0, total_free = 0, total = 0;
498
499         list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
500                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
501                 total += entry->size;
502                 if (entry->free)
503                         total_free += entry->size;
504                 else
505                         total_used += entry->size;
506         }
507         seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
508         return 0;
509 }
510 EXPORT_SYMBOL(drm_mm_dump_table);
511 #endif