drm/ttm: fix two bugs in new placement routines.
[safe/jmp/linux-2.6] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /* Notes:
31  *
32  * We store bo pointer in drm_mm_node struct so we know which bo own a
33  * specific node. There is no protection on the pointer, thus to make
34  * sure things don't go berserk you have to access this pointer while
35  * holding the global lru lock and make sure anytime you free a node you
36  * reset the pointer to NULL.
37  */
38
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42 #include <linux/jiffies.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
45 #include <linux/mm.h>
46 #include <linux/file.h>
47 #include <linux/module.h>
48
49 #define TTM_ASSERT_LOCKED(param)
50 #define TTM_DEBUG(fmt, arg...)
51 #define TTM_BO_HASH_ORDER 13
52
53 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
54 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
55 static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57 static struct attribute ttm_bo_count = {
58         .name = "bo_count",
59         .mode = S_IRUGO
60 };
61
62 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63 {
64         int i;
65
66         for (i = 0; i <= TTM_PL_PRIV5; i++)
67                 if (flags & (1 << i)) {
68                         *mem_type = i;
69                         return 0;
70                 }
71         return -EINVAL;
72 }
73
74 static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
75                                         struct ttm_mem_type_manager *man)
76 {
77         printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
78         printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
79         printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
80         printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
81         printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
82         printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
83         printk(KERN_ERR TTM_PFX "    size: %ld\n", (unsigned long)man->size);
84         printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
85                 man->available_caching);
86         printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
87                 man->default_caching);
88         spin_lock(&glob->lru_lock);
89         drm_mm_debug_table(&man->manager, TTM_PFX);
90         spin_unlock(&glob->lru_lock);
91 }
92
93 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94                                         struct ttm_placement *placement)
95 {
96         struct ttm_bo_device *bdev = bo->bdev;
97         struct ttm_bo_global *glob = bo->glob;
98         struct ttm_mem_type_manager *man;
99         int i, ret, mem_type;
100
101         printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
102                 bo, bo->mem.num_pages, bo->mem.size >> 10,
103                 bo->mem.size >> 20);
104         for (i = 0; i < placement->num_placement; i++) {
105                 ret = ttm_mem_type_from_flags(placement->placement[i],
106                                                 &mem_type);
107                 if (ret)
108                         return;
109                 man = &bdev->man[mem_type];
110                 printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
111                         i, placement->placement[i], mem_type);
112                 ttm_mem_type_manager_debug(glob, man);
113         }
114 }
115
116 static ssize_t ttm_bo_global_show(struct kobject *kobj,
117                                   struct attribute *attr,
118                                   char *buffer)
119 {
120         struct ttm_bo_global *glob =
121                 container_of(kobj, struct ttm_bo_global, kobj);
122
123         return snprintf(buffer, PAGE_SIZE, "%lu\n",
124                         (unsigned long) atomic_read(&glob->bo_count));
125 }
126
127 static struct attribute *ttm_bo_global_attrs[] = {
128         &ttm_bo_count,
129         NULL
130 };
131
132 static struct sysfs_ops ttm_bo_global_ops = {
133         .show = &ttm_bo_global_show
134 };
135
136 static struct kobj_type ttm_bo_glob_kobj_type  = {
137         .release = &ttm_bo_global_kobj_release,
138         .sysfs_ops = &ttm_bo_global_ops,
139         .default_attrs = ttm_bo_global_attrs
140 };
141
142
143 static inline uint32_t ttm_bo_type_flags(unsigned type)
144 {
145         return 1 << (type);
146 }
147
148 static void ttm_bo_release_list(struct kref *list_kref)
149 {
150         struct ttm_buffer_object *bo =
151             container_of(list_kref, struct ttm_buffer_object, list_kref);
152         struct ttm_bo_device *bdev = bo->bdev;
153
154         BUG_ON(atomic_read(&bo->list_kref.refcount));
155         BUG_ON(atomic_read(&bo->kref.refcount));
156         BUG_ON(atomic_read(&bo->cpu_writers));
157         BUG_ON(bo->sync_obj != NULL);
158         BUG_ON(bo->mem.mm_node != NULL);
159         BUG_ON(!list_empty(&bo->lru));
160         BUG_ON(!list_empty(&bo->ddestroy));
161
162         if (bo->ttm)
163                 ttm_tt_destroy(bo->ttm);
164         atomic_dec(&bo->glob->bo_count);
165         if (bo->destroy)
166                 bo->destroy(bo);
167         else {
168                 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
169                 kfree(bo);
170         }
171 }
172
173 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
174 {
175
176         if (interruptible) {
177                 int ret = 0;
178
179                 ret = wait_event_interruptible(bo->event_queue,
180                                                atomic_read(&bo->reserved) == 0);
181                 if (unlikely(ret != 0))
182                         return ret;
183         } else {
184                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
185         }
186         return 0;
187 }
188 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
189
190 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
191 {
192         struct ttm_bo_device *bdev = bo->bdev;
193         struct ttm_mem_type_manager *man;
194
195         BUG_ON(!atomic_read(&bo->reserved));
196
197         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
198
199                 BUG_ON(!list_empty(&bo->lru));
200
201                 man = &bdev->man[bo->mem.mem_type];
202                 list_add_tail(&bo->lru, &man->lru);
203                 kref_get(&bo->list_kref);
204
205                 if (bo->ttm != NULL) {
206                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
207                         kref_get(&bo->list_kref);
208                 }
209         }
210 }
211
212 /**
213  * Call with the lru_lock held.
214  */
215
216 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
217 {
218         int put_count = 0;
219
220         if (!list_empty(&bo->swap)) {
221                 list_del_init(&bo->swap);
222                 ++put_count;
223         }
224         if (!list_empty(&bo->lru)) {
225                 list_del_init(&bo->lru);
226                 ++put_count;
227         }
228
229         /*
230          * TODO: Add a driver hook to delete from
231          * driver-specific LRU's here.
232          */
233
234         return put_count;
235 }
236
237 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
238                           bool interruptible,
239                           bool no_wait, bool use_sequence, uint32_t sequence)
240 {
241         struct ttm_bo_global *glob = bo->glob;
242         int ret;
243
244         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
245                 if (use_sequence && bo->seq_valid &&
246                         (sequence - bo->val_seq < (1 << 31))) {
247                         return -EAGAIN;
248                 }
249
250                 if (no_wait)
251                         return -EBUSY;
252
253                 spin_unlock(&glob->lru_lock);
254                 ret = ttm_bo_wait_unreserved(bo, interruptible);
255                 spin_lock(&glob->lru_lock);
256
257                 if (unlikely(ret))
258                         return ret;
259         }
260
261         if (use_sequence) {
262                 bo->val_seq = sequence;
263                 bo->seq_valid = true;
264         } else {
265                 bo->seq_valid = false;
266         }
267
268         return 0;
269 }
270 EXPORT_SYMBOL(ttm_bo_reserve);
271
272 static void ttm_bo_ref_bug(struct kref *list_kref)
273 {
274         BUG();
275 }
276
277 int ttm_bo_reserve(struct ttm_buffer_object *bo,
278                    bool interruptible,
279                    bool no_wait, bool use_sequence, uint32_t sequence)
280 {
281         struct ttm_bo_global *glob = bo->glob;
282         int put_count = 0;
283         int ret;
284
285         spin_lock(&glob->lru_lock);
286         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
287                                     sequence);
288         if (likely(ret == 0))
289                 put_count = ttm_bo_del_from_lru(bo);
290         spin_unlock(&glob->lru_lock);
291
292         while (put_count--)
293                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
294
295         return ret;
296 }
297
298 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
299 {
300         struct ttm_bo_global *glob = bo->glob;
301
302         spin_lock(&glob->lru_lock);
303         ttm_bo_add_to_lru(bo);
304         atomic_set(&bo->reserved, 0);
305         wake_up_all(&bo->event_queue);
306         spin_unlock(&glob->lru_lock);
307 }
308 EXPORT_SYMBOL(ttm_bo_unreserve);
309
310 /*
311  * Call bo->mutex locked.
312  */
313 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
314 {
315         struct ttm_bo_device *bdev = bo->bdev;
316         struct ttm_bo_global *glob = bo->glob;
317         int ret = 0;
318         uint32_t page_flags = 0;
319
320         TTM_ASSERT_LOCKED(&bo->mutex);
321         bo->ttm = NULL;
322
323         if (bdev->need_dma32)
324                 page_flags |= TTM_PAGE_FLAG_DMA32;
325
326         switch (bo->type) {
327         case ttm_bo_type_device:
328                 if (zero_alloc)
329                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
330         case ttm_bo_type_kernel:
331                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
332                                         page_flags, glob->dummy_read_page);
333                 if (unlikely(bo->ttm == NULL))
334                         ret = -ENOMEM;
335                 break;
336         case ttm_bo_type_user:
337                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
338                                         page_flags | TTM_PAGE_FLAG_USER,
339                                         glob->dummy_read_page);
340                 if (unlikely(bo->ttm == NULL)) {
341                         ret = -ENOMEM;
342                         break;
343                 }
344
345                 ret = ttm_tt_set_user(bo->ttm, current,
346                                       bo->buffer_start, bo->num_pages);
347                 if (unlikely(ret != 0))
348                         ttm_tt_destroy(bo->ttm);
349                 break;
350         default:
351                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
352                 ret = -EINVAL;
353                 break;
354         }
355
356         return ret;
357 }
358
359 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
360                                   struct ttm_mem_reg *mem,
361                                   bool evict, bool interruptible, bool no_wait)
362 {
363         struct ttm_bo_device *bdev = bo->bdev;
364         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
365         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
366         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
367         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
368         int ret = 0;
369
370         if (old_is_pci || new_is_pci ||
371             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
372                 ttm_bo_unmap_virtual(bo);
373
374         /*
375          * Create and bind a ttm if required.
376          */
377
378         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
379                 ret = ttm_bo_add_ttm(bo, false);
380                 if (ret)
381                         goto out_err;
382
383                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
384                 if (ret)
385                         goto out_err;
386
387                 if (mem->mem_type != TTM_PL_SYSTEM) {
388                         ret = ttm_tt_bind(bo->ttm, mem);
389                         if (ret)
390                                 goto out_err;
391                 }
392
393                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
394                         bo->mem = *mem;
395                         mem->mm_node = NULL;
396                         goto moved;
397                 }
398
399         }
400
401         if (bdev->driver->move_notify)
402                 bdev->driver->move_notify(bo, mem);
403
404         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
405             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
406                 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
407         else if (bdev->driver->move)
408                 ret = bdev->driver->move(bo, evict, interruptible,
409                                          no_wait, mem);
410         else
411                 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
412
413         if (ret)
414                 goto out_err;
415
416 moved:
417         if (bo->evicted) {
418                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
419                 if (ret)
420                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
421                 bo->evicted = false;
422         }
423
424         if (bo->mem.mm_node) {
425                 spin_lock(&bo->lock);
426                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
427                     bdev->man[bo->mem.mem_type].gpu_offset;
428                 bo->cur_placement = bo->mem.placement;
429                 spin_unlock(&bo->lock);
430         }
431
432         return 0;
433
434 out_err:
435         new_man = &bdev->man[bo->mem.mem_type];
436         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
437                 ttm_tt_unbind(bo->ttm);
438                 ttm_tt_destroy(bo->ttm);
439                 bo->ttm = NULL;
440         }
441
442         return ret;
443 }
444
445 /**
446  * If bo idle, remove from delayed- and lru lists, and unref.
447  * If not idle, and already on delayed list, do nothing.
448  * If not idle, and not on delayed list, put on delayed list,
449  *   up the list_kref and schedule a delayed list check.
450  */
451
452 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
453 {
454         struct ttm_bo_device *bdev = bo->bdev;
455         struct ttm_bo_global *glob = bo->glob;
456         struct ttm_bo_driver *driver = bdev->driver;
457         int ret;
458
459         spin_lock(&bo->lock);
460         (void) ttm_bo_wait(bo, false, false, !remove_all);
461
462         if (!bo->sync_obj) {
463                 int put_count;
464
465                 spin_unlock(&bo->lock);
466
467                 spin_lock(&glob->lru_lock);
468                 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
469                 BUG_ON(ret);
470                 if (bo->ttm)
471                         ttm_tt_unbind(bo->ttm);
472
473                 if (!list_empty(&bo->ddestroy)) {
474                         list_del_init(&bo->ddestroy);
475                         kref_put(&bo->list_kref, ttm_bo_ref_bug);
476                 }
477                 if (bo->mem.mm_node) {
478                         bo->mem.mm_node->private = NULL;
479                         drm_mm_put_block(bo->mem.mm_node);
480                         bo->mem.mm_node = NULL;
481                 }
482                 put_count = ttm_bo_del_from_lru(bo);
483                 spin_unlock(&glob->lru_lock);
484
485                 atomic_set(&bo->reserved, 0);
486
487                 while (put_count--)
488                         kref_put(&bo->list_kref, ttm_bo_release_list);
489
490                 return 0;
491         }
492
493         spin_lock(&glob->lru_lock);
494         if (list_empty(&bo->ddestroy)) {
495                 void *sync_obj = bo->sync_obj;
496                 void *sync_obj_arg = bo->sync_obj_arg;
497
498                 kref_get(&bo->list_kref);
499                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
500                 spin_unlock(&glob->lru_lock);
501                 spin_unlock(&bo->lock);
502
503                 if (sync_obj)
504                         driver->sync_obj_flush(sync_obj, sync_obj_arg);
505                 schedule_delayed_work(&bdev->wq,
506                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
507                 ret = 0;
508
509         } else {
510                 spin_unlock(&glob->lru_lock);
511                 spin_unlock(&bo->lock);
512                 ret = -EBUSY;
513         }
514
515         return ret;
516 }
517
518 /**
519  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
520  * encountered buffers.
521  */
522
523 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524 {
525         struct ttm_bo_global *glob = bdev->glob;
526         struct ttm_buffer_object *entry, *nentry;
527         struct list_head *list, *next;
528         int ret;
529
530         spin_lock(&glob->lru_lock);
531         list_for_each_safe(list, next, &bdev->ddestroy) {
532                 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
533                 nentry = NULL;
534
535                 /*
536                  * Protect the next list entry from destruction while we
537                  * unlock the lru_lock.
538                  */
539
540                 if (next != &bdev->ddestroy) {
541                         nentry = list_entry(next, struct ttm_buffer_object,
542                                             ddestroy);
543                         kref_get(&nentry->list_kref);
544                 }
545                 kref_get(&entry->list_kref);
546
547                 spin_unlock(&glob->lru_lock);
548                 ret = ttm_bo_cleanup_refs(entry, remove_all);
549                 kref_put(&entry->list_kref, ttm_bo_release_list);
550
551                 spin_lock(&glob->lru_lock);
552                 if (nentry) {
553                         bool next_onlist = !list_empty(next);
554                         spin_unlock(&glob->lru_lock);
555                         kref_put(&nentry->list_kref, ttm_bo_release_list);
556                         spin_lock(&glob->lru_lock);
557                         /*
558                          * Someone might have raced us and removed the
559                          * next entry from the list. We don't bother restarting
560                          * list traversal.
561                          */
562
563                         if (!next_onlist)
564                                 break;
565                 }
566                 if (ret)
567                         break;
568         }
569         ret = !list_empty(&bdev->ddestroy);
570         spin_unlock(&glob->lru_lock);
571
572         return ret;
573 }
574
575 static void ttm_bo_delayed_workqueue(struct work_struct *work)
576 {
577         struct ttm_bo_device *bdev =
578             container_of(work, struct ttm_bo_device, wq.work);
579
580         if (ttm_bo_delayed_delete(bdev, false)) {
581                 schedule_delayed_work(&bdev->wq,
582                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
583         }
584 }
585
586 static void ttm_bo_release(struct kref *kref)
587 {
588         struct ttm_buffer_object *bo =
589             container_of(kref, struct ttm_buffer_object, kref);
590         struct ttm_bo_device *bdev = bo->bdev;
591
592         if (likely(bo->vm_node != NULL)) {
593                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
594                 drm_mm_put_block(bo->vm_node);
595                 bo->vm_node = NULL;
596         }
597         write_unlock(&bdev->vm_lock);
598         ttm_bo_cleanup_refs(bo, false);
599         kref_put(&bo->list_kref, ttm_bo_release_list);
600         write_lock(&bdev->vm_lock);
601 }
602
603 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
604 {
605         struct ttm_buffer_object *bo = *p_bo;
606         struct ttm_bo_device *bdev = bo->bdev;
607
608         *p_bo = NULL;
609         write_lock(&bdev->vm_lock);
610         kref_put(&bo->kref, ttm_bo_release);
611         write_unlock(&bdev->vm_lock);
612 }
613 EXPORT_SYMBOL(ttm_bo_unref);
614
615 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
616                         bool no_wait)
617 {
618         struct ttm_bo_device *bdev = bo->bdev;
619         struct ttm_bo_global *glob = bo->glob;
620         struct ttm_mem_reg evict_mem;
621         struct ttm_placement placement;
622         int ret = 0;
623
624         spin_lock(&bo->lock);
625         ret = ttm_bo_wait(bo, false, interruptible, no_wait);
626         spin_unlock(&bo->lock);
627
628         if (unlikely(ret != 0)) {
629                 if (ret != -ERESTARTSYS) {
630                         printk(KERN_ERR TTM_PFX
631                                "Failed to expire sync object before "
632                                "buffer eviction.\n");
633                 }
634                 goto out;
635         }
636
637         BUG_ON(!atomic_read(&bo->reserved));
638
639         evict_mem = bo->mem;
640         evict_mem.mm_node = NULL;
641
642         placement.fpfn = 0;
643         placement.lpfn = 0;
644         placement.num_placement = 0;
645         placement.num_busy_placement = 0;
646         bdev->driver->evict_flags(bo, &placement);
647         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
648                                 no_wait);
649         if (ret) {
650                 if (ret != -ERESTARTSYS) {
651                         printk(KERN_ERR TTM_PFX
652                                "Failed to find memory space for "
653                                "buffer 0x%p eviction.\n", bo);
654                         ttm_bo_mem_space_debug(bo, &placement);
655                 }
656                 goto out;
657         }
658
659         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
660                                      no_wait);
661         if (ret) {
662                 if (ret != -ERESTARTSYS)
663                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
664                 spin_lock(&glob->lru_lock);
665                 if (evict_mem.mm_node) {
666                         evict_mem.mm_node->private = NULL;
667                         drm_mm_put_block(evict_mem.mm_node);
668                         evict_mem.mm_node = NULL;
669                 }
670                 spin_unlock(&glob->lru_lock);
671                 goto out;
672         }
673         bo->evicted = true;
674 out:
675         return ret;
676 }
677
678 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
679                                 uint32_t mem_type,
680                                 bool interruptible, bool no_wait)
681 {
682         struct ttm_bo_global *glob = bdev->glob;
683         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
684         struct ttm_buffer_object *bo;
685         int ret, put_count = 0;
686
687         spin_lock(&glob->lru_lock);
688         bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
689         kref_get(&bo->list_kref);
690         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
691         if (likely(ret == 0))
692                 put_count = ttm_bo_del_from_lru(bo);
693         spin_unlock(&glob->lru_lock);
694         if (unlikely(ret != 0))
695                 return ret;
696         while (put_count--)
697                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
698         ret = ttm_bo_evict(bo, interruptible, no_wait);
699         ttm_bo_unreserve(bo);
700         kref_put(&bo->list_kref, ttm_bo_release_list);
701         return ret;
702 }
703
704 static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
705                                 struct ttm_mem_type_manager *man,
706                                 struct ttm_placement *placement,
707                                 struct ttm_mem_reg *mem,
708                                 struct drm_mm_node **node)
709 {
710         struct ttm_bo_global *glob = bo->glob;
711         unsigned long lpfn;
712         int ret;
713
714         lpfn = placement->lpfn;
715         if (!lpfn)
716                 lpfn = man->size;
717         *node = NULL;
718         do {
719                 ret = drm_mm_pre_get(&man->manager);
720                 if (unlikely(ret))
721                         return ret;
722
723                 spin_lock(&glob->lru_lock);
724                 *node = drm_mm_search_free_in_range(&man->manager,
725                                         mem->num_pages, mem->page_alignment,
726                                         placement->fpfn, lpfn, 1);
727                 if (unlikely(*node == NULL)) {
728                         spin_unlock(&glob->lru_lock);
729                         return 0;
730                 }
731                 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
732                                                         mem->page_alignment,
733                                                         placement->fpfn,
734                                                         lpfn);
735                 spin_unlock(&glob->lru_lock);
736         } while (*node == NULL);
737         return 0;
738 }
739
740 /**
741  * Repeatedly evict memory from the LRU for @mem_type until we create enough
742  * space, or we've evicted everything and there isn't enough space.
743  */
744 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
745                                         uint32_t mem_type,
746                                         struct ttm_placement *placement,
747                                         struct ttm_mem_reg *mem,
748                                         bool interruptible, bool no_wait)
749 {
750         struct ttm_bo_device *bdev = bo->bdev;
751         struct ttm_bo_global *glob = bdev->glob;
752         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
753         struct drm_mm_node *node;
754         int ret;
755
756         do {
757                 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
758                 if (unlikely(ret != 0))
759                         return ret;
760                 if (node)
761                         break;
762                 spin_lock(&glob->lru_lock);
763                 if (list_empty(&man->lru)) {
764                         spin_unlock(&glob->lru_lock);
765                         break;
766                 }
767                 spin_unlock(&glob->lru_lock);
768                 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
769                                                 no_wait);
770                 if (unlikely(ret != 0))
771                         return ret;
772         } while (1);
773         if (node == NULL)
774                 return -ENOMEM;
775         mem->mm_node = node;
776         mem->mem_type = mem_type;
777         return 0;
778 }
779
780 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
781                                       uint32_t cur_placement,
782                                       uint32_t proposed_placement)
783 {
784         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
785         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
786
787         /**
788          * Keep current caching if possible.
789          */
790
791         if ((cur_placement & caching) != 0)
792                 result |= (cur_placement & caching);
793         else if ((man->default_caching & caching) != 0)
794                 result |= man->default_caching;
795         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
796                 result |= TTM_PL_FLAG_CACHED;
797         else if ((TTM_PL_FLAG_WC & caching) != 0)
798                 result |= TTM_PL_FLAG_WC;
799         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
800                 result |= TTM_PL_FLAG_UNCACHED;
801
802         return result;
803 }
804
805 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
806                                  bool disallow_fixed,
807                                  uint32_t mem_type,
808                                  uint32_t proposed_placement,
809                                  uint32_t *masked_placement)
810 {
811         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
812
813         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
814                 return false;
815
816         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
817                 return false;
818
819         if ((proposed_placement & man->available_caching) == 0)
820                 return false;
821
822         cur_flags |= (proposed_placement & man->available_caching);
823
824         *masked_placement = cur_flags;
825         return true;
826 }
827
828 /**
829  * Creates space for memory region @mem according to its type.
830  *
831  * This function first searches for free space in compatible memory types in
832  * the priority order defined by the driver.  If free space isn't found, then
833  * ttm_bo_mem_force_space is attempted in priority order to evict and find
834  * space.
835  */
836 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
837                         struct ttm_placement *placement,
838                         struct ttm_mem_reg *mem,
839                         bool interruptible, bool no_wait)
840 {
841         struct ttm_bo_device *bdev = bo->bdev;
842         struct ttm_mem_type_manager *man;
843         uint32_t mem_type = TTM_PL_SYSTEM;
844         uint32_t cur_flags = 0;
845         bool type_found = false;
846         bool type_ok = false;
847         bool has_erestartsys = false;
848         struct drm_mm_node *node = NULL;
849         int i, ret;
850
851         mem->mm_node = NULL;
852         for (i = 0; i < placement->num_placement; ++i) {
853                 ret = ttm_mem_type_from_flags(placement->placement[i],
854                                                 &mem_type);
855                 if (ret)
856                         return ret;
857                 man = &bdev->man[mem_type];
858
859                 type_ok = ttm_bo_mt_compatible(man,
860                                                 bo->type == ttm_bo_type_user,
861                                                 mem_type,
862                                                 placement->placement[i],
863                                                 &cur_flags);
864
865                 if (!type_ok)
866                         continue;
867
868                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
869                                                   cur_flags);
870                 /*
871                  * Use the access and other non-mapping-related flag bits from
872                  * the memory placement flags to the current flags
873                  */
874                 ttm_flag_masked(&cur_flags, placement->placement[i],
875                                 ~TTM_PL_MASK_MEMTYPE);
876
877                 if (mem_type == TTM_PL_SYSTEM)
878                         break;
879
880                 if (man->has_type && man->use_type) {
881                         type_found = true;
882                         ret = ttm_bo_man_get_node(bo, man, placement, mem,
883                                                         &node);
884                         if (unlikely(ret))
885                                 return ret;
886                 }
887                 if (node)
888                         break;
889         }
890
891         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
892                 mem->mm_node = node;
893                 mem->mem_type = mem_type;
894                 mem->placement = cur_flags;
895                 if (node)
896                         node->private = bo;
897                 return 0;
898         }
899
900         if (!type_found)
901                 return -EINVAL;
902
903         for (i = 0; i < placement->num_busy_placement; ++i) {
904                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
905                                                 &mem_type);
906                 if (ret)
907                         return ret;
908                 man = &bdev->man[mem_type];
909                 if (!man->has_type)
910                         continue;
911                 if (!ttm_bo_mt_compatible(man,
912                                                 bo->type == ttm_bo_type_user,
913                                                 mem_type,
914                                                 placement->busy_placement[i],
915                                                 &cur_flags))
916                         continue;
917
918                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
919                                                   cur_flags);
920                 /*
921                  * Use the access and other non-mapping-related flag bits from
922                  * the memory placement flags to the current flags
923                  */
924                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
925                                 ~TTM_PL_MASK_MEMTYPE);
926
927                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
928                                                 interruptible, no_wait);
929                 if (ret == 0 && mem->mm_node) {
930                         mem->placement = cur_flags;
931                         mem->mm_node->private = bo;
932                         return 0;
933                 }
934                 if (ret == -ERESTARTSYS)
935                         has_erestartsys = true;
936         }
937         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
938         return ret;
939 }
940 EXPORT_SYMBOL(ttm_bo_mem_space);
941
942 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
943 {
944         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
945                 return -EBUSY;
946
947         return wait_event_interruptible(bo->event_queue,
948                                         atomic_read(&bo->cpu_writers) == 0);
949 }
950 EXPORT_SYMBOL(ttm_bo_wait_cpu);
951
952 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
953                         struct ttm_placement *placement,
954                         bool interruptible, bool no_wait)
955 {
956         struct ttm_bo_global *glob = bo->glob;
957         int ret = 0;
958         struct ttm_mem_reg mem;
959
960         BUG_ON(!atomic_read(&bo->reserved));
961
962         /*
963          * FIXME: It's possible to pipeline buffer moves.
964          * Have the driver move function wait for idle when necessary,
965          * instead of doing it here.
966          */
967         spin_lock(&bo->lock);
968         ret = ttm_bo_wait(bo, false, interruptible, no_wait);
969         spin_unlock(&bo->lock);
970         if (ret)
971                 return ret;
972         mem.num_pages = bo->num_pages;
973         mem.size = mem.num_pages << PAGE_SHIFT;
974         mem.page_alignment = bo->mem.page_alignment;
975         /*
976          * Determine where to move the buffer.
977          */
978         ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
979         if (ret)
980                 goto out_unlock;
981         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
982 out_unlock:
983         if (ret && mem.mm_node) {
984                 spin_lock(&glob->lru_lock);
985                 mem.mm_node->private = NULL;
986                 drm_mm_put_block(mem.mm_node);
987                 spin_unlock(&glob->lru_lock);
988         }
989         return ret;
990 }
991
992 static int ttm_bo_mem_compat(struct ttm_placement *placement,
993                              struct ttm_mem_reg *mem)
994 {
995         int i;
996
997         for (i = 0; i < placement->num_placement; i++) {
998                 if ((placement->placement[i] & mem->placement &
999                         TTM_PL_MASK_CACHING) &&
1000                         (placement->placement[i] & mem->placement &
1001                         TTM_PL_MASK_MEM))
1002                         return i;
1003         }
1004         return -1;
1005 }
1006
1007 int ttm_bo_validate(struct ttm_buffer_object *bo,
1008                         struct ttm_placement *placement,
1009                         bool interruptible, bool no_wait)
1010 {
1011         int ret;
1012
1013         BUG_ON(!atomic_read(&bo->reserved));
1014         /* Check that range is valid */
1015         if (placement->lpfn || placement->fpfn)
1016                 if (placement->fpfn > placement->lpfn ||
1017                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1018                         return -EINVAL;
1019         /*
1020          * Check whether we need to move buffer.
1021          */
1022         ret = ttm_bo_mem_compat(placement, &bo->mem);
1023         if (ret < 0) {
1024                 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
1025                 if (ret)
1026                         return ret;
1027         } else {
1028                 /*
1029                  * Use the access and other non-mapping-related flag bits from
1030                  * the compatible memory placement flags to the active flags
1031                  */
1032                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1033                                 ~TTM_PL_MASK_MEMTYPE);
1034         }
1035         /*
1036          * We might need to add a TTM.
1037          */
1038         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1039                 ret = ttm_bo_add_ttm(bo, true);
1040                 if (ret)
1041                         return ret;
1042         }
1043         return 0;
1044 }
1045 EXPORT_SYMBOL(ttm_bo_validate);
1046
1047 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1048                                 struct ttm_placement *placement)
1049 {
1050         int i;
1051
1052         if (placement->fpfn || placement->lpfn) {
1053                 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1054                         printk(KERN_ERR TTM_PFX "Page number range to small "
1055                                 "Need %lu pages, range is [%u, %u]\n",
1056                                 bo->mem.num_pages, placement->fpfn,
1057                                 placement->lpfn);
1058                         return -EINVAL;
1059                 }
1060         }
1061         for (i = 0; i < placement->num_placement; i++) {
1062                 if (!capable(CAP_SYS_ADMIN)) {
1063                         if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1064                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1065                                         "modify NO_EVICT status.\n");
1066                                 return -EINVAL;
1067                         }
1068                 }
1069         }
1070         for (i = 0; i < placement->num_busy_placement; i++) {
1071                 if (!capable(CAP_SYS_ADMIN)) {
1072                         if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1073                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1074                                         "modify NO_EVICT status.\n");
1075                                 return -EINVAL;
1076                         }
1077                 }
1078         }
1079         return 0;
1080 }
1081
1082 int ttm_bo_init(struct ttm_bo_device *bdev,
1083                 struct ttm_buffer_object *bo,
1084                 unsigned long size,
1085                 enum ttm_bo_type type,
1086                 struct ttm_placement *placement,
1087                 uint32_t page_alignment,
1088                 unsigned long buffer_start,
1089                 bool interruptible,
1090                 struct file *persistant_swap_storage,
1091                 size_t acc_size,
1092                 void (*destroy) (struct ttm_buffer_object *))
1093 {
1094         int ret = 0;
1095         unsigned long num_pages;
1096
1097         size += buffer_start & ~PAGE_MASK;
1098         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1099         if (num_pages == 0) {
1100                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1101                 return -EINVAL;
1102         }
1103         bo->destroy = destroy;
1104
1105         spin_lock_init(&bo->lock);
1106         kref_init(&bo->kref);
1107         kref_init(&bo->list_kref);
1108         atomic_set(&bo->cpu_writers, 0);
1109         atomic_set(&bo->reserved, 1);
1110         init_waitqueue_head(&bo->event_queue);
1111         INIT_LIST_HEAD(&bo->lru);
1112         INIT_LIST_HEAD(&bo->ddestroy);
1113         INIT_LIST_HEAD(&bo->swap);
1114         bo->bdev = bdev;
1115         bo->glob = bdev->glob;
1116         bo->type = type;
1117         bo->num_pages = num_pages;
1118         bo->mem.mem_type = TTM_PL_SYSTEM;
1119         bo->mem.num_pages = bo->num_pages;
1120         bo->mem.mm_node = NULL;
1121         bo->mem.page_alignment = page_alignment;
1122         bo->buffer_start = buffer_start & PAGE_MASK;
1123         bo->priv_flags = 0;
1124         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1125         bo->seq_valid = false;
1126         bo->persistant_swap_storage = persistant_swap_storage;
1127         bo->acc_size = acc_size;
1128         atomic_inc(&bo->glob->bo_count);
1129
1130         ret = ttm_bo_check_placement(bo, placement);
1131         if (unlikely(ret != 0))
1132                 goto out_err;
1133
1134         /*
1135          * For ttm_bo_type_device buffers, allocate
1136          * address space from the device.
1137          */
1138         if (bo->type == ttm_bo_type_device) {
1139                 ret = ttm_bo_setup_vm(bo);
1140                 if (ret)
1141                         goto out_err;
1142         }
1143
1144         ret = ttm_bo_validate(bo, placement, interruptible, false);
1145         if (ret)
1146                 goto out_err;
1147
1148         ttm_bo_unreserve(bo);
1149         return 0;
1150
1151 out_err:
1152         ttm_bo_unreserve(bo);
1153         ttm_bo_unref(&bo);
1154
1155         return ret;
1156 }
1157 EXPORT_SYMBOL(ttm_bo_init);
1158
1159 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1160                                  unsigned long num_pages)
1161 {
1162         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1163             PAGE_MASK;
1164
1165         return glob->ttm_bo_size + 2 * page_array_size;
1166 }
1167
1168 int ttm_bo_create(struct ttm_bo_device *bdev,
1169                         unsigned long size,
1170                         enum ttm_bo_type type,
1171                         struct ttm_placement *placement,
1172                         uint32_t page_alignment,
1173                         unsigned long buffer_start,
1174                         bool interruptible,
1175                         struct file *persistant_swap_storage,
1176                         struct ttm_buffer_object **p_bo)
1177 {
1178         struct ttm_buffer_object *bo;
1179         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1180         int ret;
1181
1182         size_t acc_size =
1183             ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1184         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1185         if (unlikely(ret != 0))
1186                 return ret;
1187
1188         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1189
1190         if (unlikely(bo == NULL)) {
1191                 ttm_mem_global_free(mem_glob, acc_size);
1192                 return -ENOMEM;
1193         }
1194
1195         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1196                                 buffer_start, interruptible,
1197                                 persistant_swap_storage, acc_size, NULL);
1198         if (likely(ret == 0))
1199                 *p_bo = bo;
1200
1201         return ret;
1202 }
1203
1204 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1205                                         unsigned mem_type, bool allow_errors)
1206 {
1207         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1208         struct ttm_bo_global *glob = bdev->glob;
1209         int ret;
1210
1211         /*
1212          * Can't use standard list traversal since we're unlocking.
1213          */
1214
1215         spin_lock(&glob->lru_lock);
1216         while (!list_empty(&man->lru)) {
1217                 spin_unlock(&glob->lru_lock);
1218                 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1219                 if (ret) {
1220                         if (allow_errors) {
1221                                 return ret;
1222                         } else {
1223                                 printk(KERN_ERR TTM_PFX
1224                                         "Cleanup eviction failed\n");
1225                         }
1226                 }
1227                 spin_lock(&glob->lru_lock);
1228         }
1229         spin_unlock(&glob->lru_lock);
1230         return 0;
1231 }
1232
1233 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1234 {
1235         struct ttm_bo_global *glob = bdev->glob;
1236         struct ttm_mem_type_manager *man;
1237         int ret = -EINVAL;
1238
1239         if (mem_type >= TTM_NUM_MEM_TYPES) {
1240                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1241                 return ret;
1242         }
1243         man = &bdev->man[mem_type];
1244
1245         if (!man->has_type) {
1246                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1247                        "memory manager type %u\n", mem_type);
1248                 return ret;
1249         }
1250
1251         man->use_type = false;
1252         man->has_type = false;
1253
1254         ret = 0;
1255         if (mem_type > 0) {
1256                 ttm_bo_force_list_clean(bdev, mem_type, false);
1257
1258                 spin_lock(&glob->lru_lock);
1259                 if (drm_mm_clean(&man->manager))
1260                         drm_mm_takedown(&man->manager);
1261                 else
1262                         ret = -EBUSY;
1263
1264                 spin_unlock(&glob->lru_lock);
1265         }
1266
1267         return ret;
1268 }
1269 EXPORT_SYMBOL(ttm_bo_clean_mm);
1270
1271 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1272 {
1273         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1274
1275         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1276                 printk(KERN_ERR TTM_PFX
1277                        "Illegal memory manager memory type %u.\n",
1278                        mem_type);
1279                 return -EINVAL;
1280         }
1281
1282         if (!man->has_type) {
1283                 printk(KERN_ERR TTM_PFX
1284                        "Memory type %u has not been initialized.\n",
1285                        mem_type);
1286                 return 0;
1287         }
1288
1289         return ttm_bo_force_list_clean(bdev, mem_type, true);
1290 }
1291 EXPORT_SYMBOL(ttm_bo_evict_mm);
1292
1293 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1294                         unsigned long p_size)
1295 {
1296         int ret = -EINVAL;
1297         struct ttm_mem_type_manager *man;
1298
1299         if (type >= TTM_NUM_MEM_TYPES) {
1300                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1301                 return ret;
1302         }
1303
1304         man = &bdev->man[type];
1305         if (man->has_type) {
1306                 printk(KERN_ERR TTM_PFX
1307                        "Memory manager already initialized for type %d\n",
1308                        type);
1309                 return ret;
1310         }
1311
1312         ret = bdev->driver->init_mem_type(bdev, type, man);
1313         if (ret)
1314                 return ret;
1315
1316         ret = 0;
1317         if (type != TTM_PL_SYSTEM) {
1318                 if (!p_size) {
1319                         printk(KERN_ERR TTM_PFX
1320                                "Zero size memory manager type %d\n",
1321                                type);
1322                         return ret;
1323                 }
1324                 ret = drm_mm_init(&man->manager, 0, p_size);
1325                 if (ret)
1326                         return ret;
1327         }
1328         man->has_type = true;
1329         man->use_type = true;
1330         man->size = p_size;
1331
1332         INIT_LIST_HEAD(&man->lru);
1333
1334         return 0;
1335 }
1336 EXPORT_SYMBOL(ttm_bo_init_mm);
1337
1338 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1339 {
1340         struct ttm_bo_global *glob =
1341                 container_of(kobj, struct ttm_bo_global, kobj);
1342
1343         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1344         __free_page(glob->dummy_read_page);
1345         kfree(glob);
1346 }
1347
1348 void ttm_bo_global_release(struct ttm_global_reference *ref)
1349 {
1350         struct ttm_bo_global *glob = ref->object;
1351
1352         kobject_del(&glob->kobj);
1353         kobject_put(&glob->kobj);
1354 }
1355 EXPORT_SYMBOL(ttm_bo_global_release);
1356
1357 int ttm_bo_global_init(struct ttm_global_reference *ref)
1358 {
1359         struct ttm_bo_global_ref *bo_ref =
1360                 container_of(ref, struct ttm_bo_global_ref, ref);
1361         struct ttm_bo_global *glob = ref->object;
1362         int ret;
1363
1364         mutex_init(&glob->device_list_mutex);
1365         spin_lock_init(&glob->lru_lock);
1366         glob->mem_glob = bo_ref->mem_glob;
1367         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1368
1369         if (unlikely(glob->dummy_read_page == NULL)) {
1370                 ret = -ENOMEM;
1371                 goto out_no_drp;
1372         }
1373
1374         INIT_LIST_HEAD(&glob->swap_lru);
1375         INIT_LIST_HEAD(&glob->device_list);
1376
1377         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1378         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1379         if (unlikely(ret != 0)) {
1380                 printk(KERN_ERR TTM_PFX
1381                        "Could not register buffer object swapout.\n");
1382                 goto out_no_shrink;
1383         }
1384
1385         glob->ttm_bo_extra_size =
1386                 ttm_round_pot(sizeof(struct ttm_tt)) +
1387                 ttm_round_pot(sizeof(struct ttm_backend));
1388
1389         glob->ttm_bo_size = glob->ttm_bo_extra_size +
1390                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1391
1392         atomic_set(&glob->bo_count, 0);
1393
1394         kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1395         ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1396         if (unlikely(ret != 0))
1397                 kobject_put(&glob->kobj);
1398         return ret;
1399 out_no_shrink:
1400         __free_page(glob->dummy_read_page);
1401 out_no_drp:
1402         kfree(glob);
1403         return ret;
1404 }
1405 EXPORT_SYMBOL(ttm_bo_global_init);
1406
1407
1408 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1409 {
1410         int ret = 0;
1411         unsigned i = TTM_NUM_MEM_TYPES;
1412         struct ttm_mem_type_manager *man;
1413         struct ttm_bo_global *glob = bdev->glob;
1414
1415         while (i--) {
1416                 man = &bdev->man[i];
1417                 if (man->has_type) {
1418                         man->use_type = false;
1419                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1420                                 ret = -EBUSY;
1421                                 printk(KERN_ERR TTM_PFX
1422                                        "DRM memory manager type %d "
1423                                        "is not clean.\n", i);
1424                         }
1425                         man->has_type = false;
1426                 }
1427         }
1428
1429         mutex_lock(&glob->device_list_mutex);
1430         list_del(&bdev->device_list);
1431         mutex_unlock(&glob->device_list_mutex);
1432
1433         if (!cancel_delayed_work(&bdev->wq))
1434                 flush_scheduled_work();
1435
1436         while (ttm_bo_delayed_delete(bdev, true))
1437                 ;
1438
1439         spin_lock(&glob->lru_lock);
1440         if (list_empty(&bdev->ddestroy))
1441                 TTM_DEBUG("Delayed destroy list was clean\n");
1442
1443         if (list_empty(&bdev->man[0].lru))
1444                 TTM_DEBUG("Swap list was clean\n");
1445         spin_unlock(&glob->lru_lock);
1446
1447         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1448         write_lock(&bdev->vm_lock);
1449         drm_mm_takedown(&bdev->addr_space_mm);
1450         write_unlock(&bdev->vm_lock);
1451
1452         return ret;
1453 }
1454 EXPORT_SYMBOL(ttm_bo_device_release);
1455
1456 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1457                        struct ttm_bo_global *glob,
1458                        struct ttm_bo_driver *driver,
1459                        uint64_t file_page_offset,
1460                        bool need_dma32)
1461 {
1462         int ret = -EINVAL;
1463
1464         rwlock_init(&bdev->vm_lock);
1465         bdev->driver = driver;
1466
1467         memset(bdev->man, 0, sizeof(bdev->man));
1468
1469         /*
1470          * Initialize the system memory buffer type.
1471          * Other types need to be driver / IOCTL initialized.
1472          */
1473         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1474         if (unlikely(ret != 0))
1475                 goto out_no_sys;
1476
1477         bdev->addr_space_rb = RB_ROOT;
1478         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1479         if (unlikely(ret != 0))
1480                 goto out_no_addr_mm;
1481
1482         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1483         bdev->nice_mode = true;
1484         INIT_LIST_HEAD(&bdev->ddestroy);
1485         bdev->dev_mapping = NULL;
1486         bdev->glob = glob;
1487         bdev->need_dma32 = need_dma32;
1488
1489         mutex_lock(&glob->device_list_mutex);
1490         list_add_tail(&bdev->device_list, &glob->device_list);
1491         mutex_unlock(&glob->device_list_mutex);
1492
1493         return 0;
1494 out_no_addr_mm:
1495         ttm_bo_clean_mm(bdev, 0);
1496 out_no_sys:
1497         return ret;
1498 }
1499 EXPORT_SYMBOL(ttm_bo_device_init);
1500
1501 /*
1502  * buffer object vm functions.
1503  */
1504
1505 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1506 {
1507         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1508
1509         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1510                 if (mem->mem_type == TTM_PL_SYSTEM)
1511                         return false;
1512
1513                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1514                         return false;
1515
1516                 if (mem->placement & TTM_PL_FLAG_CACHED)
1517                         return false;
1518         }
1519         return true;
1520 }
1521
1522 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1523                       struct ttm_mem_reg *mem,
1524                       unsigned long *bus_base,
1525                       unsigned long *bus_offset, unsigned long *bus_size)
1526 {
1527         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1528
1529         *bus_size = 0;
1530         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1531                 return -EINVAL;
1532
1533         if (ttm_mem_reg_is_pci(bdev, mem)) {
1534                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1535                 *bus_size = mem->num_pages << PAGE_SHIFT;
1536                 *bus_base = man->io_offset;
1537         }
1538
1539         return 0;
1540 }
1541
1542 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1543 {
1544         struct ttm_bo_device *bdev = bo->bdev;
1545         loff_t offset = (loff_t) bo->addr_space_offset;
1546         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1547
1548         if (!bdev->dev_mapping)
1549                 return;
1550
1551         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1552 }
1553 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1554
1555 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1556 {
1557         struct ttm_bo_device *bdev = bo->bdev;
1558         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1559         struct rb_node *parent = NULL;
1560         struct ttm_buffer_object *cur_bo;
1561         unsigned long offset = bo->vm_node->start;
1562         unsigned long cur_offset;
1563
1564         while (*cur) {
1565                 parent = *cur;
1566                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1567                 cur_offset = cur_bo->vm_node->start;
1568                 if (offset < cur_offset)
1569                         cur = &parent->rb_left;
1570                 else if (offset > cur_offset)
1571                         cur = &parent->rb_right;
1572                 else
1573                         BUG();
1574         }
1575
1576         rb_link_node(&bo->vm_rb, parent, cur);
1577         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1578 }
1579
1580 /**
1581  * ttm_bo_setup_vm:
1582  *
1583  * @bo: the buffer to allocate address space for
1584  *
1585  * Allocate address space in the drm device so that applications
1586  * can mmap the buffer and access the contents. This only
1587  * applies to ttm_bo_type_device objects as others are not
1588  * placed in the drm device address space.
1589  */
1590
1591 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1592 {
1593         struct ttm_bo_device *bdev = bo->bdev;
1594         int ret;
1595
1596 retry_pre_get:
1597         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1598         if (unlikely(ret != 0))
1599                 return ret;
1600
1601         write_lock(&bdev->vm_lock);
1602         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1603                                          bo->mem.num_pages, 0, 0);
1604
1605         if (unlikely(bo->vm_node == NULL)) {
1606                 ret = -ENOMEM;
1607                 goto out_unlock;
1608         }
1609
1610         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1611                                               bo->mem.num_pages, 0);
1612
1613         if (unlikely(bo->vm_node == NULL)) {
1614                 write_unlock(&bdev->vm_lock);
1615                 goto retry_pre_get;
1616         }
1617
1618         ttm_bo_vm_insert_rb(bo);
1619         write_unlock(&bdev->vm_lock);
1620         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1621
1622         return 0;
1623 out_unlock:
1624         write_unlock(&bdev->vm_lock);
1625         return ret;
1626 }
1627
1628 int ttm_bo_wait(struct ttm_buffer_object *bo,
1629                 bool lazy, bool interruptible, bool no_wait)
1630 {
1631         struct ttm_bo_driver *driver = bo->bdev->driver;
1632         void *sync_obj;
1633         void *sync_obj_arg;
1634         int ret = 0;
1635
1636         if (likely(bo->sync_obj == NULL))
1637                 return 0;
1638
1639         while (bo->sync_obj) {
1640
1641                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1642                         void *tmp_obj = bo->sync_obj;
1643                         bo->sync_obj = NULL;
1644                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1645                         spin_unlock(&bo->lock);
1646                         driver->sync_obj_unref(&tmp_obj);
1647                         spin_lock(&bo->lock);
1648                         continue;
1649                 }
1650
1651                 if (no_wait)
1652                         return -EBUSY;
1653
1654                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1655                 sync_obj_arg = bo->sync_obj_arg;
1656                 spin_unlock(&bo->lock);
1657                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1658                                             lazy, interruptible);
1659                 if (unlikely(ret != 0)) {
1660                         driver->sync_obj_unref(&sync_obj);
1661                         spin_lock(&bo->lock);
1662                         return ret;
1663                 }
1664                 spin_lock(&bo->lock);
1665                 if (likely(bo->sync_obj == sync_obj &&
1666                            bo->sync_obj_arg == sync_obj_arg)) {
1667                         void *tmp_obj = bo->sync_obj;
1668                         bo->sync_obj = NULL;
1669                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1670                                   &bo->priv_flags);
1671                         spin_unlock(&bo->lock);
1672                         driver->sync_obj_unref(&sync_obj);
1673                         driver->sync_obj_unref(&tmp_obj);
1674                         spin_lock(&bo->lock);
1675                 } else {
1676                         spin_unlock(&bo->lock);
1677                         driver->sync_obj_unref(&sync_obj);
1678                         spin_lock(&bo->lock);
1679                 }
1680         }
1681         return 0;
1682 }
1683 EXPORT_SYMBOL(ttm_bo_wait);
1684
1685 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1686 {
1687         atomic_set(&bo->reserved, 0);
1688         wake_up_all(&bo->event_queue);
1689 }
1690
1691 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1692                              bool no_wait)
1693 {
1694         int ret;
1695
1696         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1697                 if (no_wait)
1698                         return -EBUSY;
1699                 else if (interruptible) {
1700                         ret = wait_event_interruptible
1701                             (bo->event_queue, atomic_read(&bo->reserved) == 0);
1702                         if (unlikely(ret != 0))
1703                                 return ret;
1704                 } else {
1705                         wait_event(bo->event_queue,
1706                                    atomic_read(&bo->reserved) == 0);
1707                 }
1708         }
1709         return 0;
1710 }
1711
1712 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1713 {
1714         int ret = 0;
1715
1716         /*
1717          * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1718          * makes sure the lru lists are updated.
1719          */
1720
1721         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1722         if (unlikely(ret != 0))
1723                 return ret;
1724         spin_lock(&bo->lock);
1725         ret = ttm_bo_wait(bo, false, true, no_wait);
1726         spin_unlock(&bo->lock);
1727         if (likely(ret == 0))
1728                 atomic_inc(&bo->cpu_writers);
1729         ttm_bo_unreserve(bo);
1730         return ret;
1731 }
1732 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1733
1734 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1735 {
1736         if (atomic_dec_and_test(&bo->cpu_writers))
1737                 wake_up_all(&bo->event_queue);
1738 }
1739 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1740
1741 /**
1742  * A buffer object shrink method that tries to swap out the first
1743  * buffer object on the bo_global::swap_lru list.
1744  */
1745
1746 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1747 {
1748         struct ttm_bo_global *glob =
1749             container_of(shrink, struct ttm_bo_global, shrink);
1750         struct ttm_buffer_object *bo;
1751         int ret = -EBUSY;
1752         int put_count;
1753         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1754
1755         spin_lock(&glob->lru_lock);
1756         while (ret == -EBUSY) {
1757                 if (unlikely(list_empty(&glob->swap_lru))) {
1758                         spin_unlock(&glob->lru_lock);
1759                         return -EBUSY;
1760                 }
1761
1762                 bo = list_first_entry(&glob->swap_lru,
1763                                       struct ttm_buffer_object, swap);
1764                 kref_get(&bo->list_kref);
1765
1766                 /**
1767                  * Reserve buffer. Since we unlock while sleeping, we need
1768                  * to re-check that nobody removed us from the swap-list while
1769                  * we slept.
1770                  */
1771
1772                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1773                 if (unlikely(ret == -EBUSY)) {
1774                         spin_unlock(&glob->lru_lock);
1775                         ttm_bo_wait_unreserved(bo, false);
1776                         kref_put(&bo->list_kref, ttm_bo_release_list);
1777                         spin_lock(&glob->lru_lock);
1778                 }
1779         }
1780
1781         BUG_ON(ret != 0);
1782         put_count = ttm_bo_del_from_lru(bo);
1783         spin_unlock(&glob->lru_lock);
1784
1785         while (put_count--)
1786                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1787
1788         /**
1789          * Wait for GPU, then move to system cached.
1790          */
1791
1792         spin_lock(&bo->lock);
1793         ret = ttm_bo_wait(bo, false, false, false);
1794         spin_unlock(&bo->lock);
1795
1796         if (unlikely(ret != 0))
1797                 goto out;
1798
1799         if ((bo->mem.placement & swap_placement) != swap_placement) {
1800                 struct ttm_mem_reg evict_mem;
1801
1802                 evict_mem = bo->mem;
1803                 evict_mem.mm_node = NULL;
1804                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1805                 evict_mem.mem_type = TTM_PL_SYSTEM;
1806
1807                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1808                                              false, false);
1809                 if (unlikely(ret != 0))
1810                         goto out;
1811         }
1812
1813         ttm_bo_unmap_virtual(bo);
1814
1815         /**
1816          * Swap out. Buffer will be swapped in again as soon as
1817          * anyone tries to access a ttm page.
1818          */
1819
1820         ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1821 out:
1822
1823         /**
1824          *
1825          * Unreserve without putting on LRU to avoid swapping out an
1826          * already swapped buffer.
1827          */
1828
1829         atomic_set(&bo->reserved, 0);
1830         wake_up_all(&bo->event_queue);
1831         kref_put(&bo->list_kref, ttm_bo_release_list);
1832         return ret;
1833 }
1834
1835 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1836 {
1837         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1838                 ;
1839 }